From 3621e6fc68709a365f848e7a7a66c706c70bb4f4 Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Tue, 8 Oct 2024 00:32:29 -0700 Subject: [PATCH] chore: Update discovery artifacts (#2495) ## Deleted keys were detected in the following stable discovery artifacts: androidpublisher v3 https://togithub.com/googleapis/google-api-python-client/commit/12accf27b14ce210a79763b4a992969be77a4e86 artifactregistry v1 https://togithub.com/googleapis/google-api-python-client/commit/b67926ca2a1466b5eb08000a25732a2a4c356e94 discoveryengine v1 https://togithub.com/googleapis/google-api-python-client/commit/4f2e432a75f08556bcd11298b9d7aa02632d4db1 securitycenter v1 https://togithub.com/googleapis/google-api-python-client/commit/208aca686f447fb6a0b4880ba8cd8c1301d77b58 ## Deleted keys were detected in the following pre-stable discovery artifacts: beyondcorp v1alpha https://togithub.com/googleapis/google-api-python-client/commit/2d7c6c3547d34b16d38235d4fe5c8f60c125b597 discoveryengine v1alpha https://togithub.com/googleapis/google-api-python-client/commit/4f2e432a75f08556bcd11298b9d7aa02632d4db1 merchantapi accounts_v1beta https://togithub.com/googleapis/google-api-python-client/commit/4da81ec26a0b1110447738c62aa1b8d32abc4a3f ## Discovery Artifact Change Summary: feat(androidpublisher): update the api https://togithub.com/googleapis/google-api-python-client/commit/12accf27b14ce210a79763b4a992969be77a4e86 feat(apigee): update the api https://togithub.com/googleapis/google-api-python-client/commit/98976690639cb4e26dd14a2ecfb163a816038029 feat(artifactregistry): update the api https://togithub.com/googleapis/google-api-python-client/commit/b67926ca2a1466b5eb08000a25732a2a4c356e94 feat(backupdr): update the api https://togithub.com/googleapis/google-api-python-client/commit/6890ede80c01c5a19a11a440862b46d2b3a9b568 feat(beyondcorp): update the api https://togithub.com/googleapis/google-api-python-client/commit/2d7c6c3547d34b16d38235d4fe5c8f60c125b597 feat(bigquery): update the api https://togithub.com/googleapis/google-api-python-client/commit/545012f098f8b0dfb13a1de12e8da9d02e4bbc83 feat(chat): update the api https://togithub.com/googleapis/google-api-python-client/commit/7a77a8abbc0023287c2960b498171e4019cb2297 feat(clouddeploy): update the api https://togithub.com/googleapis/google-api-python-client/commit/7d17fc9267efc6341cfe4e891d167476062b245f feat(container): update the api https://togithub.com/googleapis/google-api-python-client/commit/666ea1412e3d3b46a13baa5930bfdaf13eb9f513 feat(datamigration): update the api https://togithub.com/googleapis/google-api-python-client/commit/3c1f9a0a84c0c6f4585005c92fcfcf39c47df9c2 feat(dataproc): update the api https://togithub.com/googleapis/google-api-python-client/commit/38b605e8d76c1279ff13a2a064a396f2a0a3e0b4 fix(developerconnect): update the api https://togithub.com/googleapis/google-api-python-client/commit/80d154ad5116b25bf919c343abe5541f45da7640 feat(discoveryengine): update the api https://togithub.com/googleapis/google-api-python-client/commit/4f2e432a75f08556bcd11298b9d7aa02632d4db1 fix(factchecktools): update the api https://togithub.com/googleapis/google-api-python-client/commit/9040e465dfa143e1f766d11b5cdd38f4a4355e59 feat(firebaseappdistribution): update the api https://togithub.com/googleapis/google-api-python-client/commit/9beb0569c3eaa1f840b4fd6b2fd16690d6f6e82b feat(firebaseml): update the api https://togithub.com/googleapis/google-api-python-client/commit/8295bb91d003b3d8f1475fdc5a178e2edaf2800c feat(language): update the api https://togithub.com/googleapis/google-api-python-client/commit/0b0651fc6f9d61888967837def7bf35fd9e87bc0 feat(merchantapi): update the api https://togithub.com/googleapis/google-api-python-client/commit/4da81ec26a0b1110447738c62aa1b8d32abc4a3f feat(migrationcenter): update the api https://togithub.com/googleapis/google-api-python-client/commit/c9e15bad69db15b49fe16113aee0708b72f64d2e feat(monitoring): update the api https://togithub.com/googleapis/google-api-python-client/commit/d20011907dc747d4ed9fdcf6f448c82749faa876 feat(run): update the api https://togithub.com/googleapis/google-api-python-client/commit/7db81a1b546c2b2ef95562a25ddaa52bcba7410b feat(searchads360): update the api https://togithub.com/googleapis/google-api-python-client/commit/34b019673622e0b4300160dd1529ccc0e3f188a8 feat(securitycenter): update the api https://togithub.com/googleapis/google-api-python-client/commit/208aca686f447fb6a0b4880ba8cd8c1301d77b58 feat(texttospeech): update the api https://togithub.com/googleapis/google-api-python-client/commit/2d7bc0b238ca3a9ed5ccad25221948e1ea30a093 --- ...2_v2beta1.accounts.finalizedProposals.html | 96 +- ...hangebuyer2_v2beta1.accounts.products.html | 64 +- ...angebuyer2_v2beta1.accounts.proposals.html | 352 +- ...ns.featurestores.entityTypes.features.html | 6 +- ...ns.featurestores.entityTypes.features.html | 6 +- ...aiplatform_v1beta1.projects.locations.html | 2 +- docs/dyn/analyticsadmin_v1alpha.accounts.html | 4 +- ...dmin_v1alpha.properties.bigQueryLinks.html | 12 +- ...roidpublisher_v3.externaltransactions.html | 12 - docs/dyn/apigee_v1.organizations.html | 85 + ...e_v1.organizations.securityProfilesV2.html | 289 + ...ts.locations.repositories.attachments.html | 48 +- ...projects.locations.repositories.files.html | 4 +- ...ry_v1.projects.locations.repositories.html | 20 +- ...ations.repositories.packages.versions.html | 10 +- ...ations.repositories.packages.versions.html | 4 +- ...ations.repositories.packages.versions.html | 4 +- ...marketplace_v1.bidders.finalizedDeals.html | 16 +- ...smarketplace_v1.buyers.finalizedDeals.html | 96 +- ...marketplace_v1.buyers.proposals.deals.html | 96 +- ...tplace_v1alpha.bidders.finalizedDeals.html | 16 +- ...etplace_v1alpha.buyers.finalizedDeals.html | 96 +- ...tplace_v1alpha.buyers.proposals.deals.html | 96 +- .../dyn/batch_v1.projects.locations.jobs.html | 8 +- ...ondcorp_v1.projects.locations.global_.html | 91 + ...global_.securityGateways.applications.html | 202 + ...ts.locations.global_.securityGateways.html | 91 + .../dyn/beyondcorp_v1.projects.locations.html | 10 + ...cations.securityGateways.applications.html | 221 + ...1.projects.locations.securityGateways.html | 416 + ...beyondcorp_v1alpha.projects.locations.html | 5 - docs/dyn/bigquery_v2.jobs.html | 30 + docs/dyn/bigquery_v2.models.html | 48 + ...sinessprofileperformance_v1.locations.html | 16 +- docs/dyn/calendar_v3.events.html | 26 +- docs/dyn/chat_v1.spaces.html | 457 +- docs/dyn/chat_v1.spaces.members.html | 36 +- docs/dyn/chat_v1.spaces.messages.html | 328 +- docs/dyn/chat_v1.spaces.spaceEvents.html | 648 +- ...ions.deliveryPipelines.automationRuns.html | 44 +- ...cations.deliveryPipelines.automations.html | 72 +- ....projects.locations.deliveryPipelines.html | 2 + ...s.deliveryPipelines.releases.rollouts.html | 3 + ..._v1.projects.locations.deployPolicies.html | 24 +- docs/dyn/cloudidentity_v1beta1.groups.html | 10 +- ...dkms_v1.projects.locations.keyHandles.html | 2 +- ...tainer_v1.projects.locations.clusters.html | 62 + ...projects.locations.clusters.nodePools.html | 3 + .../container_v1.projects.zones.clusters.html | 62 + ..._v1.projects.zones.clusters.nodePools.html | 3 + ...r_v1beta1.projects.locations.clusters.html | 62 + ...projects.locations.clusters.nodePools.html | 3 + ...ainer_v1beta1.projects.zones.clusters.html | 62 + ...ta1.projects.zones.clusters.nodePools.html | 3 + ...n_v1.projects.locations.migrationJobs.html | 110 + ...rojects.locations.entryGroups.entries.html | 2 +- ...ataproc_v1.projects.locations.batches.html | 5 + ...s.locations.batches.sparkApplications.html | 4334 ++++++ ...taproc_v1.projects.locations.sessions.html | 5 + ....locations.sessions.sparkApplications.html | 4334 ++++++ ....projects.locations.workflowTemplates.html | 140 +- ...dataproc_v1.projects.regions.clusters.html | 75 +- ....projects.regions.clusters.nodeGroups.html | 12 +- .../dataproc_v1.projects.regions.jobs.html | 16 +- ...v1.projects.regions.workflowTemplates.html | 140 +- ...s.collections.dataStores.customModels.html | 1 + ...ects.locations.collections.dataStores.html | 10 + ...rojects.locations.collections.engines.html | 5 + ...gine_v1.projects.locations.dataStores.html | 10 + ...discoveryengine_v1.projects.locations.html | 4 +- ...jects.locations.identityMappingStores.html | 91 + ...ions.identityMappingStores.operations.html | 187 + ...s.collections.dataStores.customModels.html | 1 + ...ects.locations.collections.dataStores.html | 10 + ...collections.dataStores.servingConfigs.html | 6 + ...rojects.locations.collections.engines.html | 7 + ...ns.collections.engines.servingConfigs.html | 6 + ...v1alpha.projects.locations.dataStores.html | 10 + ...s.locations.dataStores.servingConfigs.html | 6 + ...veryengine_v1alpha.projects.locations.html | 4 +- ...jects.locations.identityMappingStores.html | 91 + ...ions.identityMappingStores.operations.html | 187 + ...s.collections.dataStores.customModels.html | 1 + ...ects.locations.collections.dataStores.html | 10 + ...collections.dataStores.servingConfigs.html | 6 + ...rojects.locations.collections.engines.html | 7 + ...ns.collections.engines.servingConfigs.html | 6 + ..._v1beta.projects.locations.dataStores.html | 10 + ...s.locations.dataStores.servingConfigs.html | 6 + ...overyengine_v1beta.projects.locations.html | 5 + ...jects.locations.identityMappingStores.html | 91 + ...ions.identityMappingStores.operations.html | 187 + .../displayvideo_v3.advertisers.adGroups.html | 4 +- ...displayvideo_v3.advertisers.lineItems.html | 16 +- .../dyn/firebaseappcheck_v1.oauthClients.html | 12 +- .../firebaseappcheck_v1.projects.apps.html | 36 +- ...firebaseappcheck_v1.projects.services.html | 18 +- ...v1.projects.services.resourcePolicies.html | 32 +- .../firebaseappcheck_v1beta.oauthClients.html | 18 +- ...firebaseappcheck_v1beta.projects.apps.html | 60 +- ...baseappcheck_v1beta.projects.services.html | 14 +- ...ta.projects.services.resourcePolicies.html | 32 +- ..._v1alpha.projects.apps.releases.tests.html | 27 +- ....projects.locations.publishers.models.html | 25 +- docs/dyn/index.md | 4 - docs/dyn/language_v2.documents.html | 5 + .../merchantapi_accounts_v1beta.accounts.html | 17 +- ...ons_v1beta.accounts.conversionSources.html | 2 +- ...ta.accounts.notificationsubscriptions.html | 4 +- ...roducts_v1beta.accounts.productInputs.html | 4 +- ...api_products_v1beta.accounts.products.html | 6 +- ...oncenter_v1.projects.locations.assets.html | 14 +- ...jects.locations.reportConfigs.reports.html | 12 +- ...rojects.locations.sources.errorFrames.html | 4 +- ...er_v1alpha1.projects.locations.assets.html | 14 +- ...ha1.projects.locations.preferenceSets.html | 24 +- ...jects.locations.reportConfigs.reports.html | 42 +- ...rojects.locations.sources.errorFrames.html | 4 +- ...1.projects.location.prometheus.api.v1.html | 6 +- .../monitoring_v3.projects.alertPolicies.html | 156 + ...nessinformation_v1.accounts.locations.html | 144 +- ...usinessinformation_v1.googleLocations.html | 96 +- ...inessbusinessinformation_v1.locations.html | 192 +- docs/dyn/mybusinesslodging_v1.locations.html | 48 +- ...ybusinesslodging_v1.locations.lodging.html | 16 +- ...osconfig_v1.projects.patchDeployments.html | 64 +- ...nfig_v1beta.projects.patchDeployments.html | 64 +- .../realtimebidding_v1.bidders.creatives.html | 4 +- .../realtimebidding_v1.buyers.creatives.html | 24 +- .../dyn/run_v1.namespaces.configurations.html | 8 +- .../dyn/run_v1.namespaces.domainmappings.html | 8 +- docs/dyn/run_v1.namespaces.executions.html | 6 +- docs/dyn/run_v1.namespaces.jobs.html | 26 +- docs/dyn/run_v1.namespaces.revisions.html | 4 +- docs/dyn/run_v1.namespaces.routes.html | 4 +- docs/dyn/run_v1.namespaces.services.html | 24 +- docs/dyn/run_v1.namespaces.tasks.html | 4 +- ..._v1.projects.locations.configurations.html | 8 +- ..._v1.projects.locations.domainmappings.html | 8 +- .../run_v1.projects.locations.revisions.html | 4 +- .../dyn/run_v1.projects.locations.routes.html | 4 +- .../run_v1.projects.locations.services.html | 24 +- .../run_v2.projects.locations.services.html | 12 +- ...earchads360_v0.customers.searchAds360.html | 56 +- ...nter_v1.folders.locations.muteConfigs.html | 100 - ...HealthAnalyticsSettings.customModules.html | 24 + ...lyticsSettings.effectiveCustomModules.html | 6 + ...itycenter_v1.folders.sources.findings.html | 1 + ...1.organizations.locations.muteConfigs.html | 100 - ...HealthAnalyticsSettings.customModules.html | 24 + ...lyticsSettings.effectiveCustomModules.html | 6 + ...ter_v1.organizations.sources.findings.html | 1 + ...ter_v1.projects.locations.muteConfigs.html | 100 - ...HealthAnalyticsSettings.customModules.html | 24 + ...lyticsSettings.effectiveCustomModules.html | 6 + ...tycenter_v1.projects.sources.findings.html | 1 + ...servicemanagement_v1.services.configs.html | 8 +- docs/dyn/servicemanagement_v1.services.html | 2 +- docs/dyn/serviceusage_v1.services.html | 6 +- docs/dyn/serviceusage_v1beta1.services.html | 4 +- docs/dyn/storagetransfer_v1.transferJobs.html | 62 +- .../texttospeech_v1.projects.locations.html | 12 + docs/dyn/texttospeech_v1.text.html | 15 + docs/dyn/texttospeech_v1.voices.html | 43 + ...xttospeech_v1beta1.projects.locations.html | 12 + docs/dyn/texttospeech_v1beta1.text.html | 15 + docs/dyn/texttospeech_v1beta1.voices.html | 43 + ...flows_v1.projects.locations.workflows.html | 10 +- .../documents/adexchangebuyer2.v2beta1.json | 10 +- .../documents/aiplatform.v1.json | 8 +- .../documents/aiplatform.v1beta1.json | 10 +- .../documents/analyticsadmin.v1alpha.json | 4 +- .../documents/androidpublisher.v3.json | 17 +- .../discovery_cache/documents/apigee.v1.json | 323 +- .../documents/artifactregistry.v1.json | 36 +- .../documents/artifactregistry.v1beta1.json | 4 +- .../documents/artifactregistry.v1beta2.json | 4 +- .../authorizedbuyersmarketplace.v1.json | 10 +- .../authorizedbuyersmarketplace.v1alpha.json | 10 +- .../documents/backupdr.v1.json | 49 +- .../discovery_cache/documents/batch.v1.json | 4 +- .../documents/beyondcorp.v1.json | 748 +- .../documents/beyondcorp.v1alpha.json | 137 +- .../documents/bigquery.v2.json | 79 +- .../businessprofileperformance.v1.json | 18 +- .../documents/calendar.v3.json | 4 +- .../discovery_cache/documents/chat.v1.json | 117 +- .../documents/chromemanagement.v1.json | 10 +- .../documents/chromepolicy.v1.json | 6 +- .../documents/clouddeploy.v1.json | 101 +- .../documents/cloudidentity.v1beta1.json | 4 +- .../documents/cloudkms.v1.json | 4 +- .../documents/container.v1.json | 78 +- .../documents/container.v1beta1.json | 78 +- .../documents/datamigration.v1.json | 114 +- .../documents/dataplex.v1.json | 4 +- .../documents/dataproc.v1.json | 12731 +++++++++++----- .../documents/developerconnect.v1.json | 47 +- .../documents/discoveryengine.v1.json | 64 +- .../documents/discoveryengine.v1alpha.json | 64 +- .../documents/discoveryengine.v1beta.json | 124 +- .../documents/displayvideo.v2.json | 10 +- .../documents/displayvideo.v3.json | 12 +- .../documents/factchecktools.v1alpha1.json | 20 +- .../documents/firebaseappcheck.v1.json | 18 +- .../documents/firebaseappcheck.v1beta.json | 16 +- .../firebaseappdistribution.v1alpha.json | 54 +- .../documents/firebaseml.v2beta.json | 10 +- .../documents/language.v1.json | 28 +- .../documents/language.v1beta2.json | 28 +- .../documents/language.v2.json | 39 +- .../merchantapi.accounts_v1beta.json | 30 +- .../merchantapi.conversions_v1beta.json | 4 +- .../merchantapi.notifications_v1beta.json | 4 +- .../merchantapi.products_v1beta.json | 8 +- .../documents/migrationcenter.v1.json | 7 +- .../documents/migrationcenter.v1alpha1.json | 9 +- .../documents/monitoring.v1.json | 14 +- .../documents/monitoring.v3.json | 154 +- .../mybusinessbusinessinformation.v1.json | 10 +- .../documents/mybusinesslodging.v1.json | 10 +- .../documents/osconfig.v1.json | 10 +- .../documents/osconfig.v1beta.json | 10 +- .../documents/realtimebidding.v1.json | 6 +- .../discovery_cache/documents/run.v1.json | 10 +- .../discovery_cache/documents/run.v2.json | 14 +- .../documents/searchads360.v0.json | 102 +- .../documents/securitycenter.v1.json | 232 +- .../documents/securitycenter.v1beta1.json | 25 +- .../documents/securitycenter.v1beta2.json | 2 +- .../serviceconsumermanagement.v1.json | 4 +- .../serviceconsumermanagement.v1beta1.json | 4 +- .../documents/servicemanagement.v1.json | 4 +- .../documents/serviceusage.v1.json | 4 +- .../documents/serviceusage.v1beta1.json | 4 +- .../discovery_cache/documents/speech.v1.json | 8 +- .../documents/speech.v1p1beta1.json | 8 +- .../documents/storagetransfer.v1.json | 14 +- .../documents/texttospeech.v1.json | 179 +- .../documents/texttospeech.v1beta1.json | 181 +- .../documents/workflows.v1.json | 4 +- 241 files changed, 26900 insertions(+), 6142 deletions(-) create mode 100644 docs/dyn/apigee_v1.organizations.securityProfilesV2.html create mode 100644 docs/dyn/beyondcorp_v1.projects.locations.global_.html create mode 100644 docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.applications.html create mode 100644 docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.html create mode 100644 docs/dyn/beyondcorp_v1.projects.locations.securityGateways.applications.html create mode 100644 docs/dyn/beyondcorp_v1.projects.locations.securityGateways.html create mode 100644 docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html create mode 100644 docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html create mode 100644 docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.html create mode 100644 docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.operations.html create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.html create mode 100644 docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.operations.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.html create mode 100644 docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.operations.html diff --git a/docs/dyn/adexchangebuyer2_v2beta1.accounts.finalizedProposals.html b/docs/dyn/adexchangebuyer2_v2beta1.accounts.finalizedProposals.html index 02ba43ca621..b764e9b2cde 100644 --- a/docs/dyn/adexchangebuyer2_v2beta1.accounts.finalizedProposals.html +++ b/docs/dyn/adexchangebuyer2_v2beta1.accounts.finalizedProposals.html @@ -400,16 +400,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -444,16 +444,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -824,16 +824,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -868,16 +868,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1231,16 +1231,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1275,16 +1275,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/adexchangebuyer2_v2beta1.accounts.products.html b/docs/dyn/adexchangebuyer2_v2beta1.accounts.products.html index 11c50839691..b64fff06aaa 100644 --- a/docs/dyn/adexchangebuyer2_v2beta1.accounts.products.html +++ b/docs/dyn/adexchangebuyer2_v2beta1.accounts.products.html @@ -154,16 +154,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -198,16 +198,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -373,16 +373,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -417,16 +417,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/adexchangebuyer2_v2beta1.accounts.proposals.html b/docs/dyn/adexchangebuyer2_v2beta1.accounts.proposals.html index 16426817306..6cb92824901 100644 --- a/docs/dyn/adexchangebuyer2_v2beta1.accounts.proposals.html +++ b/docs/dyn/adexchangebuyer2_v2beta1.accounts.proposals.html @@ -413,16 +413,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -457,16 +457,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -854,16 +854,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -898,16 +898,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1263,16 +1263,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1307,16 +1307,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1655,16 +1655,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1699,16 +1699,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -2045,16 +2045,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -2089,16 +2089,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -2443,16 +2443,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -2487,16 +2487,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -2851,16 +2851,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -2895,16 +2895,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -3272,16 +3272,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -3316,16 +3316,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -3676,16 +3676,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -3720,16 +3720,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -4069,16 +4069,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -4113,16 +4113,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -4459,16 +4459,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -4503,16 +4503,16 @@

Method Details

{ # Daypart targeting message that specifies if the ad can be shown only during certain parts of a day/week. "dayOfWeek": "A String", # The day of the week to target. If unspecified, applicable to all days. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The ending time of the day for the ad to show (minute level granularity). The end time is exclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # The starting time of day for the ad to show (minute level granularity). The start time is inclusive. This field is not available for filtering in PQL queries. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.featurestores.entityTypes.features.html b/docs/dyn/aiplatform_v1.projects.locations.featurestores.entityTypes.features.html index e0f3a2cf055..2b430badade 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.featurestores.entityTypes.features.html +++ b/docs/dyn/aiplatform_v1.projects.locations.featurestores.entityTypes.features.html @@ -109,12 +109,12 @@

Method Details

Creates a batch of Features in a given EntityType.
 
 Args:
-  parent: string, Required. The resource name of the EntityType to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` (required)
+  parent: string, Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}` (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Request message for FeaturestoreService.BatchCreateFeatures.
-  "requests": [ # Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.
+{ # Request message for FeaturestoreService.BatchCreateFeatures. Request message for FeatureRegistryService.BatchCreateFeatures.
+  "requests": [ # Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType / FeatureGroup. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.
     { # Request message for FeaturestoreService.CreateFeature. Request message for FeatureRegistryService.CreateFeature.
       "feature": { # Feature Metadata information. For example, color is a feature that describes an apple. # Required. The Feature to create.
         "createTime": "A String", # Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was created.
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featurestores.entityTypes.features.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featurestores.entityTypes.features.html
index 519846cf5b5..3a473505188 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.featurestores.entityTypes.features.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featurestores.entityTypes.features.html
@@ -109,12 +109,12 @@ 

Method Details

Creates a batch of Features in a given EntityType.
 
 Args:
-  parent: string, Required. The resource name of the EntityType to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` (required)
+  parent: string, Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}` (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # Request message for FeaturestoreService.BatchCreateFeatures.
-  "requests": [ # Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.
+{ # Request message for FeaturestoreService.BatchCreateFeatures. Request message for FeatureRegistryService.BatchCreateFeatures.
+  "requests": [ # Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType / FeatureGroup. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.
     { # Request message for FeaturestoreService.CreateFeature. Request message for FeatureRegistryService.CreateFeature.
       "feature": { # Feature Metadata information. For example, color is a feature that describes an apple. # Required. The Feature to create.
         "createTime": "A String", # Output only. Only applicable for Vertex AI Feature Store (Legacy). Timestamp when this EntityType was created.
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.html b/docs/dyn/aiplatform_v1beta1.projects.locations.html
index afa53ced92d..b98d3cc6c55 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.html
@@ -1029,7 +1029,7 @@ 

Method Details

"contexts": [ # All its contexts. { # A context of the query. "distance": 3.14, # The distance between the query dense embedding vector and the context text vector. - "sourceUri": "A String", # For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name. + "sourceUri": "A String", # If the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name. "sparseDistance": 3.14, # The distance between the query sparse embedding vector and the context text vector. "text": "A String", # The text chunk. }, diff --git a/docs/dyn/analyticsadmin_v1alpha.accounts.html b/docs/dyn/analyticsadmin_v1alpha.accounts.html index a5885331d73..23f3004c4f6 100644 --- a/docs/dyn/analyticsadmin_v1alpha.accounts.html +++ b/docs/dyn/analyticsadmin_v1alpha.accounts.html @@ -706,7 +706,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. }, "calculatedMetric": { # A definition for a calculated metric. # A snapshot of a CalculatedMetric resource in change history. @@ -1189,7 +1189,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. }, "calculatedMetric": { # A definition for a calculated metric. # A snapshot of a CalculatedMetric resource in change history. diff --git a/docs/dyn/analyticsadmin_v1alpha.properties.bigQueryLinks.html b/docs/dyn/analyticsadmin_v1alpha.properties.bigQueryLinks.html index eff7c630e76..58a26b67f14 100644 --- a/docs/dyn/analyticsadmin_v1alpha.properties.bigQueryLinks.html +++ b/docs/dyn/analyticsadmin_v1alpha.properties.bigQueryLinks.html @@ -123,7 +123,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. } @@ -148,7 +148,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. }
@@ -198,7 +198,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. }
@@ -234,7 +234,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. }, ], @@ -278,7 +278,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. } @@ -304,7 +304,7 @@

Method Details

"freshDailyExportEnabled": True or False, # If set true, enables fresh daily export to the linked Google Cloud project. "includeAdvertisingId": True or False, # If set true, exported data will include advertising identifiers for mobile app streams. "name": "A String", # Output only. Resource name of this BigQuery link. Format: 'properties/{property_id}/bigQueryLinks/{bigquery_link_id}' Format: 'properties/1234/bigQueryLinks/abc567' - "project": "A String", # Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234' + "project": "A String", # Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234' "streamingExportEnabled": True or False, # If set true, enables streaming export to the linked Google Cloud project. } diff --git a/docs/dyn/androidpublisher_v3.externaltransactions.html b/docs/dyn/androidpublisher_v3.externaltransactions.html index 006fccb896d..9bbb8ffa422 100644 --- a/docs/dyn/androidpublisher_v3.externaltransactions.html +++ b/docs/dyn/androidpublisher_v3.externaltransactions.html @@ -111,9 +111,6 @@

Method Details

"currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string. }, - "externalOfferInitialAcquisitionDetails": { # Details about the first time a user/device completed a transaction using external offers. # Optional. Details about the first time a user/device completed a transaction using external offers. Not required for transactions made using user choice billing or alternative billing only. - "externalTransactionId": "A String", # Required. The external transaction id of the first completed purchase made by the user. - }, "externalTransactionId": "A String", # Output only. The id of this transaction. All transaction ids under the same package name must be unique. Set when creating the external transaction. "oneTimeTransaction": { # Represents a one-time transaction. # This is a one-time transaction and not part of a subscription. "externalTransactionToken": "A String", # Input only. Provided during the call to Create. Retrieved from the client when the alternative billing flow is launched. @@ -167,9 +164,6 @@

Method Details

"currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string. }, - "externalOfferInitialAcquisitionDetails": { # Details about the first time a user/device completed a transaction using external offers. # Optional. Details about the first time a user/device completed a transaction using external offers. Not required for transactions made using user choice billing or alternative billing only. - "externalTransactionId": "A String", # Required. The external transaction id of the first completed purchase made by the user. - }, "externalTransactionId": "A String", # Output only. The id of this transaction. All transaction ids under the same package name must be unique. Set when creating the external transaction. "oneTimeTransaction": { # Represents a one-time transaction. # This is a one-time transaction and not part of a subscription. "externalTransactionToken": "A String", # Input only. Provided during the call to Create. Retrieved from the client when the alternative billing flow is launched. @@ -229,9 +223,6 @@

Method Details

"currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string. }, - "externalOfferInitialAcquisitionDetails": { # Details about the first time a user/device completed a transaction using external offers. # Optional. Details about the first time a user/device completed a transaction using external offers. Not required for transactions made using user choice billing or alternative billing only. - "externalTransactionId": "A String", # Required. The external transaction id of the first completed purchase made by the user. - }, "externalTransactionId": "A String", # Output only. The id of this transaction. All transaction ids under the same package name must be unique. Set when creating the external transaction. "oneTimeTransaction": { # Represents a one-time transaction. # This is a one-time transaction and not part of a subscription. "externalTransactionToken": "A String", # Input only. Provided during the call to Create. Retrieved from the client when the alternative billing flow is launched. @@ -307,9 +298,6 @@

Method Details

"currency": "A String", # 3 letter Currency code, as defined by ISO 4217. See java/com/google/common/money/CurrencyCode.java "priceMicros": "A String", # Price in 1/million of the currency base unit, represented as a string. }, - "externalOfferInitialAcquisitionDetails": { # Details about the first time a user/device completed a transaction using external offers. # Optional. Details about the first time a user/device completed a transaction using external offers. Not required for transactions made using user choice billing or alternative billing only. - "externalTransactionId": "A String", # Required. The external transaction id of the first completed purchase made by the user. - }, "externalTransactionId": "A String", # Output only. The id of this transaction. All transaction ids under the same package name must be unique. Set when creating the external transaction. "oneTimeTransaction": { # Represents a one-time transaction. # This is a one-time transaction and not part of a subscription. "externalTransactionToken": "A String", # Input only. Provided during the call to Create. Retrieved from the client when the alternative billing flow is launched. diff --git a/docs/dyn/apigee_v1.organizations.html b/docs/dyn/apigee_v1.organizations.html index bda1d85a84e..886c269452f 100644 --- a/docs/dyn/apigee_v1.organizations.html +++ b/docs/dyn/apigee_v1.organizations.html @@ -179,6 +179,11 @@

Instance Methods

Returns the securityProfiles Resource.

+

+ securityProfilesV2() +

+

Returns the securityProfilesV2 Resource.

+

sharedflows()

@@ -201,6 +206,9 @@

Instance Methods

get(name, x__xgafv=None)

Gets the profile for an Apigee organization. See [Understanding organizations](https://cloud.google.com/apigee/docs/api-platform/fundamentals/organization-structure).

+

+ getControlPlaneAccess(name, x__xgafv=None)

+

Lists the service accounts allowed to access Apigee control plane directly for limited functionality. **Note**: Available to Apigee hybrid only.

getDeployedIngressConfig(name, view=None, x__xgafv=None)

Gets the deployed ingress configuration for an organization.

@@ -228,6 +236,9 @@

Instance Methods

update(name, body=None, x__xgafv=None)

Updates the properties for an Apigee organization. No other fields in the organization profile will be updated.

+

+ updateControlPlaneAccess(name, body=None, updateMask=None, x__xgafv=None)

+

Updates the permissions required to allow Apigee runtime-plane components access to the control plane. Currently, the permissions required are to: 1. Allow runtime components to publish analytics data to the control plane. **Note**: Available to Apigee hybrid only.

updateSecuritySettings(name, body=None, updateMask=None, x__xgafv=None)

UpdateSecuritySettings updates the current security settings for API Security.

@@ -461,6 +472,31 @@

Method Details

} +
+ getControlPlaneAccess(name, x__xgafv=None) +
Lists the service accounts allowed to access Apigee control plane directly for limited functionality. **Note**: Available to Apigee hybrid only.
+
+Args:
+  name: string, Required. Resource name of the Control Plane Access. Use the following structure in your request: `organizations/{org}/controlPlaneAccess` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # ControlPlaneAccess is the request body and response body of UpdateControlPlaneAccess. and the response body of GetControlPlaneAccess. The input identities contains an array of service accounts to grant access to the respective control plane resource, with each service account specified using the following format: `serviceAccount:`***service-account-name***. The ***service-account-name*** is formatted like an email address. For example: `my-control-plane-service_account@my_project_id.iam.gserviceaccount.com` You might specify multiple service accounts, for example, if you have multiple environments and wish to assign a unique service account to each one.
+  "analyticsPublisherIdentities": [ # Optional. Array of service accounts authorized to publish analytics data to the control plane (for the Message Processor component).
+    "A String",
+  ],
+  "name": "A String", # Identifier. The resource name of the ControlPlaneAccess. Format: "organizations/{org}/controlPlaneAccess"
+  "synchronizerIdentities": [ # Optional. Array of service accounts to grant access to control plane resources (for the Synchronizer component). The service accounts must have **Apigee Synchronizer Manager** role. See also [Create service accounts](https://cloud.google.com/apigee/docs/hybrid/latest/sa-about#create-the-service-accounts).
+    "A String",
+  ],
+}
+
+
getDeployedIngressConfig(name, view=None, x__xgafv=None)
Gets the deployed ingress configuration for an organization.
@@ -890,6 +926,55 @@ 

Method Details

}
+
+ updateControlPlaneAccess(name, body=None, updateMask=None, x__xgafv=None) +
Updates the permissions required to allow Apigee runtime-plane components access to the control plane. Currently, the permissions required are to: 1. Allow runtime components to publish analytics data to the control plane. **Note**: Available to Apigee hybrid only.
+
+Args:
+  name: string, Identifier. The resource name of the ControlPlaneAccess. Format: "organizations/{org}/controlPlaneAccess" (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # ControlPlaneAccess is the request body and response body of UpdateControlPlaneAccess. and the response body of GetControlPlaneAccess. The input identities contains an array of service accounts to grant access to the respective control plane resource, with each service account specified using the following format: `serviceAccount:`***service-account-name***. The ***service-account-name*** is formatted like an email address. For example: `my-control-plane-service_account@my_project_id.iam.gserviceaccount.com` You might specify multiple service accounts, for example, if you have multiple environments and wish to assign a unique service account to each one.
+  "analyticsPublisherIdentities": [ # Optional. Array of service accounts authorized to publish analytics data to the control plane (for the Message Processor component).
+    "A String",
+  ],
+  "name": "A String", # Identifier. The resource name of the ControlPlaneAccess. Format: "organizations/{org}/controlPlaneAccess"
+  "synchronizerIdentities": [ # Optional. Array of service accounts to grant access to control plane resources (for the Synchronizer component). The service accounts must have **Apigee Synchronizer Manager** role. See also [Create service accounts](https://cloud.google.com/apigee/docs/hybrid/latest/sa-about#create-the-service-accounts).
+    "A String",
+  ],
+}
+
+  updateMask: string, List of fields to be updated. Fields that can be updated: synchronizer_identities, publisher_identities.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+
updateSecuritySettings(name, body=None, updateMask=None, x__xgafv=None)
UpdateSecuritySettings updates the current security settings for API Security.
diff --git a/docs/dyn/apigee_v1.organizations.securityProfilesV2.html b/docs/dyn/apigee_v1.organizations.securityProfilesV2.html
new file mode 100644
index 00000000000..53a1c2b3fa5
--- /dev/null
+++ b/docs/dyn/apigee_v1.organizations.securityProfilesV2.html
@@ -0,0 +1,289 @@
+
+
+
+

Apigee API . organizations . securityProfilesV2

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, securityProfileV2Id=None, x__xgafv=None)

+

Create a security profile v2.

+

+ delete(name, x__xgafv=None)

+

Delete a security profile v2.

+

+ get(name, x__xgafv=None)

+

Get a security profile v2.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List security profiles v2.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Update a security profile V2.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, securityProfileV2Id=None, x__xgafv=None) +
Create a security profile v2.
+
+Args:
+  parent: string, Required. The parent resource name. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Security profile for risk assessment version 2.
+  "createTime": "A String", # Output only. The time of the security profile creation.
+  "description": "A String", # Optional. The description of the security profile.
+  "googleDefined": True or False, # Output only. Whether the security profile is google defined.
+  "name": "A String", # Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}
+  "profileAssessmentConfigs": { # Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.
+    "a_key": { # The configuration definition for a specific assessment.
+      "weight": "A String", # The weight of the assessment.
+    },
+  },
+  "updateTime": "A String", # Output only. The time of the security profile update.
+}
+
+  securityProfileV2Id: string, Required. The security profile id.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Security profile for risk assessment version 2.
+  "createTime": "A String", # Output only. The time of the security profile creation.
+  "description": "A String", # Optional. The description of the security profile.
+  "googleDefined": True or False, # Output only. Whether the security profile is google defined.
+  "name": "A String", # Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}
+  "profileAssessmentConfigs": { # Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.
+    "a_key": { # The configuration definition for a specific assessment.
+      "weight": "A String", # The weight of the assessment.
+    },
+  },
+  "updateTime": "A String", # Output only. The time of the security profile update.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Delete a security profile v2.
+
+Args:
+  name: string, Required. The name of the security profile v2 to delete. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Get a security profile v2.
+
+Args:
+  name: string, Required. The security profile id. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Security profile for risk assessment version 2.
+  "createTime": "A String", # Output only. The time of the security profile creation.
+  "description": "A String", # Optional. The description of the security profile.
+  "googleDefined": True or False, # Output only. Whether the security profile is google defined.
+  "name": "A String", # Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}
+  "profileAssessmentConfigs": { # Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.
+    "a_key": { # The configuration definition for a specific assessment.
+      "weight": "A String", # The weight of the assessment.
+    },
+  },
+  "updateTime": "A String", # Output only. The time of the security profile update.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List security profiles v2.
+
+Args:
+  parent: string, Required. For a specific organization, list of all the security profiles. Format: `organizations/{org}` (required)
+  pageSize: integer, Optional. The maximum number of profiles to return
+  pageToken: string, Optional. A page token, received from a previous `ListSecurityProfilesV2` call. Provide this to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListSecurityProfilesV2.
+  "nextPageToken": "A String", # A token that can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "securityProfilesV2": [ # List of security profiles in the organization.
+    { # Security profile for risk assessment version 2.
+      "createTime": "A String", # Output only. The time of the security profile creation.
+      "description": "A String", # Optional. The description of the security profile.
+      "googleDefined": True or False, # Output only. Whether the security profile is google defined.
+      "name": "A String", # Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}
+      "profileAssessmentConfigs": { # Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.
+        "a_key": { # The configuration definition for a specific assessment.
+          "weight": "A String", # The weight of the assessment.
+        },
+      },
+      "updateTime": "A String", # Output only. The time of the security profile update.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Update a security profile V2.
+
+Args:
+  name: string, Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Security profile for risk assessment version 2.
+  "createTime": "A String", # Output only. The time of the security profile creation.
+  "description": "A String", # Optional. The description of the security profile.
+  "googleDefined": True or False, # Output only. Whether the security profile is google defined.
+  "name": "A String", # Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}
+  "profileAssessmentConfigs": { # Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.
+    "a_key": { # The configuration definition for a specific assessment.
+      "weight": "A String", # The weight of the assessment.
+    },
+  },
+  "updateTime": "A String", # Output only. The time of the security profile update.
+}
+
+  updateMask: string, Required. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Security profile for risk assessment version 2.
+  "createTime": "A String", # Output only. The time of the security profile creation.
+  "description": "A String", # Optional. The description of the security profile.
+  "googleDefined": True or False, # Output only. Whether the security profile is google defined.
+  "name": "A String", # Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}
+  "profileAssessmentConfigs": { # Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.
+    "a_key": { # The configuration definition for a specific assessment.
+      "weight": "A String", # The weight of the assessment.
+    },
+  },
+  "updateTime": "A String", # Output only. The time of the security profile update.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/artifactregistry_v1.projects.locations.repositories.attachments.html b/docs/dyn/artifactregistry_v1.projects.locations.repositories.attachments.html index c143d196d1b..4903206de44 100644 --- a/docs/dyn/artifactregistry_v1.projects.locations.repositories.attachments.html +++ b/docs/dyn/artifactregistry_v1.projects.locations.repositories.attachments.html @@ -79,16 +79,16 @@

Instance Methods

Close httplib2 connections.

create(parent, attachmentId=None, body=None, x__xgafv=None)

-

Creates an attachment. The returned Operation will finish once the attachment has been created. Its response will be the created Attachment.

+

Creates an attachment. The returned Operation will finish once the attachment has been created. Its response will be the created attachment.

delete(name, x__xgafv=None)

-

Deletes an attachment. The returned Operation will finish once the attachments has been deleted. It will not have any Operation metadata and will return a google.protobuf.Empty response.

+

Deletes an attachment. The returned Operation will finish once the attachments has been deleted. It will not have any Operation metadata and will return a `google.protobuf.Empty` response.

get(name, x__xgafv=None)

Gets an attachment.

list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists repositories.

+

Lists attachments.

list_next()

Retrieves the next page of results.

@@ -100,26 +100,26 @@

Method Details

create(parent, attachmentId=None, body=None, x__xgafv=None) -
Creates an attachment. The returned Operation will finish once the attachment has been created. Its response will be the created Attachment.
+  
Creates an attachment. The returned Operation will finish once the attachment has been created. Its response will be the created attachment.
 
 Args:
   parent: string, Required. The name of the parent resource where the attachment will be created. (required)
   body: object, The request body.
     The object takes the form of:
 
-{ # An Attachment refers to additional metadata that can be attached to artifacts in ArtifactRegistry. An attachment consists of one or more files.
-  "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations. Client specified annotations.
+{ # An Attachment refers to additional metadata that can be attached to artifacts in Artifact Registry. An attachment consists of one or more files.
+  "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations.
     "a_key": "A String",
   },
-  "attachmentNamespace": "A String", # The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to artifactanalysis.googleapis.com.
+  "attachmentNamespace": "A String", # The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to `artifactanalysis.googleapis.com`.
   "createTime": "A String", # Output only. The time when the attachment was created.
-  "files": [ # Required. The files that blong to this Attachment. If the file ID part contains slashes, they are escaped. E.g. "projects/p1/locations/us-central1/repositories/repo1/files/sha:".
+  "files": [ # Required. The files that belong to this attachment. If the file ID part contains slashes, they are escaped. E.g. `projects/p1/locations/us-central1/repositories/repo1/files/sha:`.
     "A String",
   ],
   "name": "A String", # The name of the attachment. E.g. "projects/p1/locations/us/repositories/repo/attachments/sbom".
-  "ociVersionName": "A String", # Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. "projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1".
+  "ociVersionName": "A String", # Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. `projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1`.
   "target": "A String", # Required. The target the attachment is for, can be a Version, Package or Repository. E.g. "projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1".
-  "type": "A String", # Type of Attachment. E.g. application/vnd.spdx+jsonn
+  "type": "A String", # Type of Attachment. E.g. `application/vnd.spdx+json`
   "updateTime": "A String", # Output only. The time when the attachment was last updated.
 }
 
@@ -155,7 +155,7 @@ 

Method Details

delete(name, x__xgafv=None) -
Deletes an attachment. The returned Operation will finish once the attachments has been deleted. It will not have any Operation metadata and will return a google.protobuf.Empty response.
+  
Deletes an attachment. The returned Operation will finish once the attachments has been deleted. It will not have any Operation metadata and will return a `google.protobuf.Empty` response.
 
 Args:
   name: string, Required. The name of the attachment to delete. (required)
@@ -202,26 +202,26 @@ 

Method Details

Returns: An object of the form: - { # An Attachment refers to additional metadata that can be attached to artifacts in ArtifactRegistry. An attachment consists of one or more files. - "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations. Client specified annotations. + { # An Attachment refers to additional metadata that can be attached to artifacts in Artifact Registry. An attachment consists of one or more files. + "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, - "attachmentNamespace": "A String", # The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to artifactanalysis.googleapis.com. + "attachmentNamespace": "A String", # The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to `artifactanalysis.googleapis.com`. "createTime": "A String", # Output only. The time when the attachment was created. - "files": [ # Required. The files that blong to this Attachment. If the file ID part contains slashes, they are escaped. E.g. "projects/p1/locations/us-central1/repositories/repo1/files/sha:". + "files": [ # Required. The files that belong to this attachment. If the file ID part contains slashes, they are escaped. E.g. `projects/p1/locations/us-central1/repositories/repo1/files/sha:`. "A String", ], "name": "A String", # The name of the attachment. E.g. "projects/p1/locations/us/repositories/repo/attachments/sbom". - "ociVersionName": "A String", # Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. "projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1". + "ociVersionName": "A String", # Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. `projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1`. "target": "A String", # Required. The target the attachment is for, can be a Version, Package or Repository. E.g. "projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1". - "type": "A String", # Type of Attachment. E.g. application/vnd.spdx+jsonn + "type": "A String", # Type of Attachment. E.g. `application/vnd.spdx+json` "updateTime": "A String", # Output only. The time when the attachment was last updated. }
list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists repositories.
+  
Lists attachments.
 
 Args:
   parent: string, Required. The name of the parent resource whose attachments will be listed. (required)
@@ -238,19 +238,19 @@ 

Method Details

{ # The response from listing attachments. "attachments": [ # The Attachments returned. - { # An Attachment refers to additional metadata that can be attached to artifacts in ArtifactRegistry. An attachment consists of one or more files. - "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations. Client specified annotations. + { # An Attachment refers to additional metadata that can be attached to artifacts in Artifact Registry. An attachment consists of one or more files. + "annotations": { # Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, - "attachmentNamespace": "A String", # The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to artifactanalysis.googleapis.com. + "attachmentNamespace": "A String", # The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to `artifactanalysis.googleapis.com`. "createTime": "A String", # Output only. The time when the attachment was created. - "files": [ # Required. The files that blong to this Attachment. If the file ID part contains slashes, they are escaped. E.g. "projects/p1/locations/us-central1/repositories/repo1/files/sha:". + "files": [ # Required. The files that belong to this attachment. If the file ID part contains slashes, they are escaped. E.g. `projects/p1/locations/us-central1/repositories/repo1/files/sha:`. "A String", ], "name": "A String", # The name of the attachment. E.g. "projects/p1/locations/us/repositories/repo/attachments/sbom". - "ociVersionName": "A String", # Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. "projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1". + "ociVersionName": "A String", # Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. `projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1`. "target": "A String", # Required. The target the attachment is for, can be a Version, Package or Repository. E.g. "projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1". - "type": "A String", # Type of Attachment. E.g. application/vnd.spdx+jsonn + "type": "A String", # Type of Attachment. E.g. `application/vnd.spdx+json` "updateTime": "A String", # Output only. The time when the attachment was last updated. }, ], diff --git a/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html b/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html index 4b37b8979ec..ea8200e0df6 100644 --- a/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html +++ b/docs/dyn/artifactregistry_v1.projects.locations.repositories.files.html @@ -100,7 +100,7 @@

Instance Methods

Updates a file.

upload(parent, body=None, media_body=None, media_mime_type=None, x__xgafv=None)

-

Directly uploads a File to a repository. The returned Operation will complete once the resources are uploaded.

+

Directly uploads a file to a repository. The returned Operation will complete once the resources are uploaded.

Method Details

close() @@ -323,7 +323,7 @@

Method Details

upload(parent, body=None, media_body=None, media_mime_type=None, x__xgafv=None) -
Directly uploads a File to a repository. The returned Operation will complete once the resources are uploaded.
+  
Directly uploads a file to a repository. The returned Operation will complete once the resources are uploaded.
 
 Args:
   parent: string, Required. The resource name of the repository where the file will be uploaded. (required)
diff --git a/docs/dyn/artifactregistry_v1.projects.locations.repositories.html b/docs/dyn/artifactregistry_v1.projects.locations.repositories.html
index 231299e1b05..b2460c2c0f4 100644
--- a/docs/dyn/artifactregistry_v1.projects.locations.repositories.html
+++ b/docs/dyn/artifactregistry_v1.projects.locations.repositories.html
@@ -244,8 +244,8 @@ 

Method Details

"repositoryPath": "A String", # A custom field to define a path to a specific repository from the base. }, }, - "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RR upstream URL instead of Predefined and Custom remote repositories. UI and Gcloud will map all the new remote repositories to this field. - "uri": "A String", # Required. A common public repository base for Remote Repository. + "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RemoteRepository upstream URL instead of Predefined and Custom remote repositories. Google Cloud Console and Google Cloud CLI will map all the new remote repositories to this field. + "uri": "A String", # Required. A common public repository base for remote repository. }, "description": "A String", # The description of the remote source. "disableUpstreamValidation": True or False, # Input only. A create/update remote repo option to avoid making a HEAD/GET request to validate a remote repo and any supplied upstream credentials. @@ -438,8 +438,8 @@

Method Details

"repositoryPath": "A String", # A custom field to define a path to a specific repository from the base. }, }, - "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RR upstream URL instead of Predefined and Custom remote repositories. UI and Gcloud will map all the new remote repositories to this field. - "uri": "A String", # Required. A common public repository base for Remote Repository. + "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RemoteRepository upstream URL instead of Predefined and Custom remote repositories. Google Cloud Console and Google Cloud CLI will map all the new remote repositories to this field. + "uri": "A String", # Required. A common public repository base for remote repository. }, "description": "A String", # The description of the remote source. "disableUpstreamValidation": True or False, # Input only. A create/update remote repo option to avoid making a HEAD/GET request to validate a remote repo and any supplied upstream credentials. @@ -610,8 +610,8 @@

Method Details

"repositoryPath": "A String", # A custom field to define a path to a specific repository from the base. }, }, - "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RR upstream URL instead of Predefined and Custom remote repositories. UI and Gcloud will map all the new remote repositories to this field. - "uri": "A String", # Required. A common public repository base for Remote Repository. + "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RemoteRepository upstream URL instead of Predefined and Custom remote repositories. Google Cloud Console and Google Cloud CLI will map all the new remote repositories to this field. + "uri": "A String", # Required. A common public repository base for remote repository. }, "description": "A String", # The description of the remote source. "disableUpstreamValidation": True or False, # Input only. A create/update remote repo option to avoid making a HEAD/GET request to validate a remote repo and any supplied upstream credentials. @@ -751,8 +751,8 @@

Method Details

"repositoryPath": "A String", # A custom field to define a path to a specific repository from the base. }, }, - "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RR upstream URL instead of Predefined and Custom remote repositories. UI and Gcloud will map all the new remote repositories to this field. - "uri": "A String", # Required. A common public repository base for Remote Repository. + "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RemoteRepository upstream URL instead of Predefined and Custom remote repositories. Google Cloud Console and Google Cloud CLI will map all the new remote repositories to this field. + "uri": "A String", # Required. A common public repository base for remote repository. }, "description": "A String", # The description of the remote source. "disableUpstreamValidation": True or False, # Input only. A create/update remote repo option to avoid making a HEAD/GET request to validate a remote repo and any supplied upstream credentials. @@ -875,8 +875,8 @@

Method Details

"repositoryPath": "A String", # A custom field to define a path to a specific repository from the base. }, }, - "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RR upstream URL instead of Predefined and Custom remote repositories. UI and Gcloud will map all the new remote repositories to this field. - "uri": "A String", # Required. A common public repository base for Remote Repository. + "commonRepository": { # Common remote repository settings type. # Common remote repository settings. Used as the RemoteRepository upstream URL instead of Predefined and Custom remote repositories. Google Cloud Console and Google Cloud CLI will map all the new remote repositories to this field. + "uri": "A String", # Required. A common public repository base for remote repository. }, "description": "A String", # The description of the remote source. "disableUpstreamValidation": True or False, # Input only. A create/update remote repo option to avoid making a HEAD/GET request to validate a remote repo and any supplied upstream credentials. diff --git a/docs/dyn/artifactregistry_v1.projects.locations.repositories.packages.versions.html b/docs/dyn/artifactregistry_v1.projects.locations.repositories.packages.versions.html index fa6baf2dfb1..5869dde78fd 100644 --- a/docs/dyn/artifactregistry_v1.projects.locations.repositories.packages.versions.html +++ b/docs/dyn/artifactregistry_v1.projects.locations.repositories.packages.versions.html @@ -210,7 +210,7 @@

Method Details

"metadata": { # Output only. Repository-specific Metadata stored against this version. The fields returned are defined by the underlying repository-specific resource. Currently, the resources could be: DockerImage MavenArtifact "a_key": "", # Properties of the object. }, - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. @@ -256,7 +256,7 @@

Method Details

"metadata": { # Output only. Repository-specific Metadata stored against this version. The fields returned are defined by the underlying repository-specific resource. Currently, the resources could be: DockerImage MavenArtifact "a_key": "", # Properties of the object. }, - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. @@ -288,7 +288,7 @@

Method Details

Updates a version.
 
 Args:
-  name: string, The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. (required)
+  name: string, The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -301,7 +301,7 @@ 

Method Details

"metadata": { # Output only. Repository-specific Metadata stored against this version. The fields returned are defined by the underlying repository-specific resource. Currently, the resources could be: DockerImage MavenArtifact "a_key": "", # Properties of the object. }, - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. @@ -329,7 +329,7 @@

Method Details

"metadata": { # Output only. Repository-specific Metadata stored against this version. The fields returned are defined by the underlying repository-specific resource. Currently, the resources could be: DockerImage MavenArtifact "a_key": "", # Properties of the object. }, - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. diff --git a/docs/dyn/artifactregistry_v1beta1.projects.locations.repositories.packages.versions.html b/docs/dyn/artifactregistry_v1beta1.projects.locations.repositories.packages.versions.html index 87dd6f32110..949dff6e2db 100644 --- a/docs/dyn/artifactregistry_v1beta1.projects.locations.repositories.packages.versions.html +++ b/docs/dyn/artifactregistry_v1beta1.projects.locations.repositories.packages.versions.html @@ -153,7 +153,7 @@

Method Details

{ # The body of a version resource. A version resource represents a collection of components, such as files and other data. This may correspond to a version in many package management schemes. "createTime": "A String", # The time when the version was created. "description": "A String", # Optional. Description of the version, as specified in its metadata. - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. @@ -192,7 +192,7 @@

Method Details

{ # The body of a version resource. A version resource represents a collection of components, such as files and other data. This may correspond to a version in many package management schemes. "createTime": "A String", # The time when the version was created. "description": "A String", # Optional. Description of the version, as specified in its metadata. - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. diff --git a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.packages.versions.html b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.packages.versions.html index 131a074e4f5..3c5d7919b0c 100644 --- a/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.packages.versions.html +++ b/docs/dyn/artifactregistry_v1beta2.projects.locations.repositories.packages.versions.html @@ -156,7 +156,7 @@

Method Details

"metadata": { # Output only. Repository-specific Metadata stored against this version. The fields returned are defined by the underlying repository-specific resource. Currently, the resources could be: DockerImage MavenArtifact "a_key": "", # Properties of the object. }, - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. @@ -198,7 +198,7 @@

Method Details

"metadata": { # Output only. Repository-specific Metadata stored against this version. The fields returned are defined by the underlying repository-specific resource. Currently, the resources could be: DockerImage MavenArtifact "a_key": "", # Properties of the object. }, - "name": "A String", # The name of the version, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1". If the package or version ID parts contain slashes, the slashes are escaped. + "name": "A String", # The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped. "relatedTags": [ # Output only. A list of related tags. Will contain up to 100 tags that reference this version. { # Tags point to a version and represent an alternative name that can be used to access the version. "name": "A String", # The name of the tag, for example: "projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/tags/tag1". If the package part contains slashes, the slashes are escaped. The tag part can only have characters in [a-zA-Z0-9\-._~:@], anything else must be URL encoded. diff --git a/docs/dyn/authorizedbuyersmarketplace_v1.bidders.finalizedDeals.html b/docs/dyn/authorizedbuyersmarketplace_v1.bidders.finalizedDeals.html index 0a0ea3cf35b..2f2355a1a8b 100644 --- a/docs/dyn/authorizedbuyersmarketplace_v1.bidders.finalizedDeals.html +++ b/docs/dyn/authorizedbuyersmarketplace_v1.bidders.finalizedDeals.html @@ -201,16 +201,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/authorizedbuyersmarketplace_v1.buyers.finalizedDeals.html b/docs/dyn/authorizedbuyersmarketplace_v1.buyers.finalizedDeals.html index b26a5dda76e..f0b0a3e33d7 100644 --- a/docs/dyn/authorizedbuyersmarketplace_v1.buyers.finalizedDeals.html +++ b/docs/dyn/authorizedbuyersmarketplace_v1.buyers.finalizedDeals.html @@ -212,16 +212,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -472,16 +472,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -733,16 +733,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1012,16 +1012,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1273,16 +1273,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1534,16 +1534,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/authorizedbuyersmarketplace_v1.buyers.proposals.deals.html b/docs/dyn/authorizedbuyersmarketplace_v1.buyers.proposals.deals.html index d5fd27d0ed4..e633f0d51ea 100644 --- a/docs/dyn/authorizedbuyersmarketplace_v1.buyers.proposals.deals.html +++ b/docs/dyn/authorizedbuyersmarketplace_v1.buyers.proposals.deals.html @@ -196,16 +196,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -432,16 +432,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -676,16 +676,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -917,16 +917,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1166,16 +1166,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1397,16 +1397,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/authorizedbuyersmarketplace_v1alpha.bidders.finalizedDeals.html b/docs/dyn/authorizedbuyersmarketplace_v1alpha.bidders.finalizedDeals.html index 4cb42775a21..f2b59997983 100644 --- a/docs/dyn/authorizedbuyersmarketplace_v1alpha.bidders.finalizedDeals.html +++ b/docs/dyn/authorizedbuyersmarketplace_v1alpha.bidders.finalizedDeals.html @@ -201,16 +201,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.finalizedDeals.html b/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.finalizedDeals.html index e491e3c0a1e..17d9ced8ea0 100644 --- a/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.finalizedDeals.html +++ b/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.finalizedDeals.html @@ -212,16 +212,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -472,16 +472,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -733,16 +733,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1012,16 +1012,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1273,16 +1273,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1534,16 +1534,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.proposals.deals.html b/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.proposals.deals.html index 1268be967fd..29e74d1d625 100644 --- a/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.proposals.deals.html +++ b/docs/dyn/authorizedbuyersmarketplace_v1alpha.buyers.proposals.deals.html @@ -196,16 +196,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -432,16 +432,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -676,16 +676,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -917,16 +917,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1166,16 +1166,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1397,16 +1397,16 @@

Method Details

{ # Defines targeting for a period of time on a specific week day. "dayOfWeek": "A String", # Day of week for the period. "endTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Hours in 24 hour time between 0 and 24, inclusive. Note: 24 is logically equivalent to 0, but is supported since in some cases there may need to be differentiation made between midnight on one day and midnight on the next day. Accepted values for minutes are [0, 15, 30, 45]. 0 is the only acceptable minute value for hour 24. Seconds and nanos are ignored. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], diff --git a/docs/dyn/batch_v1.projects.locations.jobs.html b/docs/dyn/batch_v1.projects.locations.jobs.html index 185e5cc2a46..1933f69a5a4 100644 --- a/docs/dyn/batch_v1.projects.locations.jobs.html +++ b/docs/dyn/batch_v1.projects.locations.jobs.html @@ -152,7 +152,7 @@

Method Details

"machineType": "A String", # The Compute Engine machine type. "minCpuPlatform": "A String", # The minimum CPU platform. See https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform. "provisioningModel": "A String", # The provisioning model. - "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. + "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. Additionally, VMs will not consume any reservation if "NO_RESERVATION" is specified. }, }, ], @@ -422,7 +422,7 @@

Method Details

"machineType": "A String", # The Compute Engine machine type. "minCpuPlatform": "A String", # The minimum CPU platform. See https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform. "provisioningModel": "A String", # The provisioning model. - "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. + "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. Additionally, VMs will not consume any reservation if "NO_RESERVATION" is specified. }, }, ], @@ -734,7 +734,7 @@

Method Details

"machineType": "A String", # The Compute Engine machine type. "minCpuPlatform": "A String", # The minimum CPU platform. See https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform. "provisioningModel": "A String", # The provisioning model. - "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. + "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. Additionally, VMs will not consume any reservation if "NO_RESERVATION" is specified. }, }, ], @@ -1015,7 +1015,7 @@

Method Details

"machineType": "A String", # The Compute Engine machine type. "minCpuPlatform": "A String", # The minimum CPU platform. See https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform. "provisioningModel": "A String", # The provisioning model. - "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. + "reservation": "A String", # Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. Additionally, VMs will not consume any reservation if "NO_RESERVATION" is specified. }, }, ], diff --git a/docs/dyn/beyondcorp_v1.projects.locations.global_.html b/docs/dyn/beyondcorp_v1.projects.locations.global_.html new file mode 100644 index 00000000000..9a96535b58f --- /dev/null +++ b/docs/dyn/beyondcorp_v1.projects.locations.global_.html @@ -0,0 +1,91 @@ + + + +

BeyondCorp API . projects . locations . global_

+

Instance Methods

+

+ securityGateways() +

+

Returns the securityGateways Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.applications.html b/docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.applications.html new file mode 100644 index 00000000000..48aaa885a83 --- /dev/null +++ b/docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.applications.html @@ -0,0 +1,202 @@ + + + +

BeyondCorp API . projects . locations . global_ . securityGateways . applications

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, applicationId=None, body=None, requestId=None, x__xgafv=None)

+

Creates a new Application in a given project and location.

+

+ patch(name, body=None, requestId=None, updateMask=None, x__xgafv=None)

+

Updates the parameters of a single Application.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, applicationId=None, body=None, requestId=None, x__xgafv=None) +
Creates a new Application in a given project and location.
+
+Args:
+  parent: string, Required. The resource name of the parent SecurityGateway using the form: `projects/{project_id}/locations/global/securityGateways/{security_gateway_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A Beyondcorp Application resource information.
+  "createTime": "A String", # Output only. Timestamp when the resource was created.
+  "displayName": "A String", # Optional. An arbitrary user-provided name for the Application resource. Cannot exceed 64 characters.
+  "endpointMatchers": [ # Required. Endpoint matchers associated with an application. A combination of hostname and ports as endpoint matcher is used to match the application. Match conditions for OR logic. An array of match conditions to allow for multiple matching criteria. The rule is considered a match if one the conditions are met. The conditions can be one of the following combination (Hostname), (Hostname & Ports) EXAMPLES: Hostname - ("*.abc.com"), ("xyz.abc.com") Hostname and Ports - ("abc.com" and "22"), ("abc.com" and "22,33") etc
+    { # EndpointMatcher contains the information of the endpoint that will match the application.
+      "hostname": "A String", # Required. Hostname of the application.
+      "ports": [ # Optional. Ports of the application.
+        42,
+      ],
+    },
+  ],
+  "name": "A String", # Identifier. Name of the resource.
+  "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+}
+
+  applicationId: string, Optional. User-settable Application resource ID. * Must start with a letter. * Must contain between 4-63 characters from `/a-z-/`. * Must end with a number or letter.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ patch(name, body=None, requestId=None, updateMask=None, x__xgafv=None) +
Updates the parameters of a single Application.
+
+Args:
+  name: string, Identifier. Name of the resource. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A Beyondcorp Application resource information.
+  "createTime": "A String", # Output only. Timestamp when the resource was created.
+  "displayName": "A String", # Optional. An arbitrary user-provided name for the Application resource. Cannot exceed 64 characters.
+  "endpointMatchers": [ # Required. Endpoint matchers associated with an application. A combination of hostname and ports as endpoint matcher is used to match the application. Match conditions for OR logic. An array of match conditions to allow for multiple matching criteria. The rule is considered a match if one the conditions are met. The conditions can be one of the following combination (Hostname), (Hostname & Ports) EXAMPLES: Hostname - ("*.abc.com"), ("xyz.abc.com") Hostname and Ports - ("abc.com" and "22"), ("abc.com" and "22,33") etc
+    { # EndpointMatcher contains the information of the endpoint that will match the application.
+      "hostname": "A String", # Required. Hostname of the application.
+      "ports": [ # Optional. Ports of the application.
+        42,
+      ],
+    },
+  ],
+  "name": "A String", # Identifier. Name of the resource.
+  "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+}
+
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request timed out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  updateMask: string, Required. Mutable fields include: display_name.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.html b/docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.html new file mode 100644 index 00000000000..2fb0c98d7bf --- /dev/null +++ b/docs/dyn/beyondcorp_v1.projects.locations.global_.securityGateways.html @@ -0,0 +1,91 @@ + + + +

BeyondCorp API . projects . locations . global_ . securityGateways

+

Instance Methods

+

+ applications() +

+

Returns the applications Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/beyondcorp_v1.projects.locations.html b/docs/dyn/beyondcorp_v1.projects.locations.html index 175468c9ddc..7d36acf3900 100644 --- a/docs/dyn/beyondcorp_v1.projects.locations.html +++ b/docs/dyn/beyondcorp_v1.projects.locations.html @@ -99,11 +99,21 @@

Instance Methods

Returns the clientGateways Resource.

+

+ global_() +

+

Returns the global_ Resource.

+

operations()

Returns the operations Resource.

+

+ securityGateways() +

+

Returns the securityGateways Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/beyondcorp_v1.projects.locations.securityGateways.applications.html b/docs/dyn/beyondcorp_v1.projects.locations.securityGateways.applications.html new file mode 100644 index 00000000000..955fcc2a8cc --- /dev/null +++ b/docs/dyn/beyondcorp_v1.projects.locations.securityGateways.applications.html @@ -0,0 +1,221 @@ + + + +

BeyondCorp API . projects . locations . securityGateways . applications

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, requestId=None, validateOnly=None, x__xgafv=None)

+

Deletes a single Application.

+

+ get(name, x__xgafv=None)

+

Gets details of a single Application.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists Applications in a given project and location.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, requestId=None, validateOnly=None, x__xgafv=None) +
Deletes a single Application.
+
+Args:
+  name: string, Required. Name of the resource. (required)
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  validateOnly: boolean, Optional. If set, validates request by executing a dry-run which would not alter the resource in any way.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets details of a single Application.
+
+Args:
+  name: string, Required. The resource name of the Application using the form: `projects/{project_id}/locations/global/securityGateway/{security_gateway_id}/applications/{application_id}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A Beyondcorp Application resource information.
+  "createTime": "A String", # Output only. Timestamp when the resource was created.
+  "displayName": "A String", # Optional. An arbitrary user-provided name for the Application resource. Cannot exceed 64 characters.
+  "endpointMatchers": [ # Required. Endpoint matchers associated with an application. A combination of hostname and ports as endpoint matcher is used to match the application. Match conditions for OR logic. An array of match conditions to allow for multiple matching criteria. The rule is considered a match if one the conditions are met. The conditions can be one of the following combination (Hostname), (Hostname & Ports) EXAMPLES: Hostname - ("*.abc.com"), ("xyz.abc.com") Hostname and Ports - ("abc.com" and "22"), ("abc.com" and "22,33") etc
+    { # EndpointMatcher contains the information of the endpoint that will match the application.
+      "hostname": "A String", # Required. Hostname of the application.
+      "ports": [ # Optional. Ports of the application.
+        42,
+      ],
+    },
+  ],
+  "name": "A String", # Identifier. Name of the resource.
+  "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists Applications in a given project and location.
+
+Args:
+  parent: string, Required. The parent location to which the resources belong. `projects/{project_id}/locations/global/securityGateways/{security_gateway_id}` (required)
+  filter: string, Optional. A filter specifying constraints of a list operation. All fields in the Application message are supported. For example, the following query will return the Application with displayName "test-application" For more information, please refer to https://google.aip.dev/160.
+  orderBy: string, Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.
+  pageSize: integer, Optional. The maximum number of items to return. If not specified, a default value of 50 will be used by the service. Regardless of the page_size value, the response may include a partial list and a caller should only rely on response's next_page_token to determine if there are more instances left to be queried.
+  pageToken: string, Optional. The next_page_token value returned from a previous ListApplicationsRequest, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Message for response to listing Applications.
+  "applications": [ # A list of BeyondCorp Application in the project.
+    { # A Beyondcorp Application resource information.
+      "createTime": "A String", # Output only. Timestamp when the resource was created.
+      "displayName": "A String", # Optional. An arbitrary user-provided name for the Application resource. Cannot exceed 64 characters.
+      "endpointMatchers": [ # Required. Endpoint matchers associated with an application. A combination of hostname and ports as endpoint matcher is used to match the application. Match conditions for OR logic. An array of match conditions to allow for multiple matching criteria. The rule is considered a match if one the conditions are met. The conditions can be one of the following combination (Hostname), (Hostname & Ports) EXAMPLES: Hostname - ("*.abc.com"), ("xyz.abc.com") Hostname and Ports - ("abc.com" and "22"), ("abc.com" and "22,33") etc
+        { # EndpointMatcher contains the information of the endpoint that will match the application.
+          "hostname": "A String", # Required. Hostname of the application.
+          "ports": [ # Optional. Ports of the application.
+            42,
+          ],
+        },
+      ],
+      "name": "A String", # Identifier. Name of the resource.
+      "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+    },
+  ],
+  "nextPageToken": "A String", # A token to retrieve the next page of results, or empty if there are no more results in the list.
+  "unreachable": [ # A list of locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/beyondcorp_v1.projects.locations.securityGateways.html b/docs/dyn/beyondcorp_v1.projects.locations.securityGateways.html new file mode 100644 index 00000000000..964107b8e26 --- /dev/null +++ b/docs/dyn/beyondcorp_v1.projects.locations.securityGateways.html @@ -0,0 +1,416 @@ + + + +

BeyondCorp API . projects . locations . securityGateways

+

Instance Methods

+

+ applications() +

+

Returns the applications Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, requestId=None, securityGatewayId=None, x__xgafv=None)

+

Creates a new SecurityGateway in a given project and location.

+

+ delete(name, requestId=None, validateOnly=None, x__xgafv=None)

+

Deletes a single SecurityGateway.

+

+ get(name, x__xgafv=None)

+

Gets details of a single SecurityGateway.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists SecurityGateways in a given project and location.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, requestId=None, updateMask=None, x__xgafv=None)

+

Updates the parameters of a single SecurityGateway.

+

+ setPeering(securityGateway, body=None, x__xgafv=None)

+

This is a custom method to allow customers to create a peering connections between Google network and customer networks. This is enabled only for the allowlisted customers.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, requestId=None, securityGatewayId=None, x__xgafv=None) +
Creates a new SecurityGateway in a given project and location.
+
+Args:
+  parent: string, Required. The resource project name of the SecurityGateway location using the form: `projects/{project_id}/locations/{location_id}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Information about a BeyoncCorp SecurityGateway resource.
+  "createTime": "A String", # Output only. Timestamp when the resource was created.
+  "displayName": "A String", # Optional. An arbitrary user-provided name for the SecurityGateway. Cannot exceed 64 characters.
+  "externalIps": [ # Output only. IP addresses that will be used for establishing connection to the endpoints.
+    "A String",
+  ],
+  "hubs": { # Optional. Map of Hubs that represents regional data path deployment with GCP region as a key.
+    "a_key": { # The Hub message contains information pertaining to the regional data path deployments.
+      "natGatewayConfig": { # Represents the NAT Gateway configuration. # Optional. NAT gateway setup to ensure enough NAT IP addresses are available to handle the traffic needed to access the applications. Allows to explicitly enable or disable the NAT in the Hub along with the total IPs allocated to handle the capacity limits.
+        "natIps": [ # Output only. List of NAT IPs that will be used for establishing connection to the endpoints.
+          "A String",
+        ],
+      },
+    },
+  },
+  "name": "A String", # Identifier. Name of the resource.
+  "state": "A String", # Output only. The operational state of the SecurityGateway.
+  "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+}
+
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request.
+  securityGatewayId: string, Optional. User-settable SecurityGateway resource ID. * Must start with a letter. * Must contain between 4-63 characters from `/a-z-/`. * Must end with a number or letter.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, requestId=None, validateOnly=None, x__xgafv=None) +
Deletes a single SecurityGateway.
+
+Args:
+  name: string, Required. BeyondCorp SecurityGateway name using the form: `projects/{project_id}/locations/{location_id}/securityGateways/{security_gateway_id}` (required)
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  validateOnly: boolean, Optional. If set, validates request by executing a dry-run which would not alter the resource in any way.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets details of a single SecurityGateway.
+
+Args:
+  name: string, Required. The resource name of the PartnerTenant using the form: `projects/{project_id}/locations/{location_id}/securityGateway/{security_gateway_id}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Information about a BeyoncCorp SecurityGateway resource.
+  "createTime": "A String", # Output only. Timestamp when the resource was created.
+  "displayName": "A String", # Optional. An arbitrary user-provided name for the SecurityGateway. Cannot exceed 64 characters.
+  "externalIps": [ # Output only. IP addresses that will be used for establishing connection to the endpoints.
+    "A String",
+  ],
+  "hubs": { # Optional. Map of Hubs that represents regional data path deployment with GCP region as a key.
+    "a_key": { # The Hub message contains information pertaining to the regional data path deployments.
+      "natGatewayConfig": { # Represents the NAT Gateway configuration. # Optional. NAT gateway setup to ensure enough NAT IP addresses are available to handle the traffic needed to access the applications. Allows to explicitly enable or disable the NAT in the Hub along with the total IPs allocated to handle the capacity limits.
+        "natIps": [ # Output only. List of NAT IPs that will be used for establishing connection to the endpoints.
+          "A String",
+        ],
+      },
+    },
+  },
+  "name": "A String", # Identifier. Name of the resource.
+  "state": "A String", # Output only. The operational state of the SecurityGateway.
+  "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists SecurityGateways in a given project and location.
+
+Args:
+  parent: string, Required. The parent location to which the resources belong. `projects/{project_id}/locations/{location_id}/` (required)
+  filter: string, Optional. A filter specifying constraints of a list operation. All fields in the SecurityGateway message are supported. For example, the following query will return the SecurityGateway with displayName "test-security-gateway" For more information, please refer to https://google.aip.dev/160.
+  orderBy: string, Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.
+  pageSize: integer, Optional. The maximum number of items to return. If not specified, a default value of 50 will be used by the service. Regardless of the page_size value, the response may include a partial list and a caller should only rely on response's next_page_token to determine if there are more instances left to be queried.
+  pageToken: string, Optional. The next_page_token value returned from a previous ListSecurityGatewayRequest, if any.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Message for response to listing SecurityGateways.
+  "nextPageToken": "A String", # A token to retrieve the next page of results, or empty if there are no more results in the list.
+  "securityGateways": [ # A list of BeyondCorp SecurityGateway in the project.
+    { # Information about a BeyoncCorp SecurityGateway resource.
+      "createTime": "A String", # Output only. Timestamp when the resource was created.
+      "displayName": "A String", # Optional. An arbitrary user-provided name for the SecurityGateway. Cannot exceed 64 characters.
+      "externalIps": [ # Output only. IP addresses that will be used for establishing connection to the endpoints.
+        "A String",
+      ],
+      "hubs": { # Optional. Map of Hubs that represents regional data path deployment with GCP region as a key.
+        "a_key": { # The Hub message contains information pertaining to the regional data path deployments.
+          "natGatewayConfig": { # Represents the NAT Gateway configuration. # Optional. NAT gateway setup to ensure enough NAT IP addresses are available to handle the traffic needed to access the applications. Allows to explicitly enable or disable the NAT in the Hub along with the total IPs allocated to handle the capacity limits.
+            "natIps": [ # Output only. List of NAT IPs that will be used for establishing connection to the endpoints.
+              "A String",
+            ],
+          },
+        },
+      },
+      "name": "A String", # Identifier. Name of the resource.
+      "state": "A String", # Output only. The operational state of the SecurityGateway.
+      "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+    },
+  ],
+  "unreachable": [ # A list of locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, requestId=None, updateMask=None, x__xgafv=None) +
Updates the parameters of a single SecurityGateway.
+
+Args:
+  name: string, Identifier. Name of the resource. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Information about a BeyoncCorp SecurityGateway resource.
+  "createTime": "A String", # Output only. Timestamp when the resource was created.
+  "displayName": "A String", # Optional. An arbitrary user-provided name for the SecurityGateway. Cannot exceed 64 characters.
+  "externalIps": [ # Output only. IP addresses that will be used for establishing connection to the endpoints.
+    "A String",
+  ],
+  "hubs": { # Optional. Map of Hubs that represents regional data path deployment with GCP region as a key.
+    "a_key": { # The Hub message contains information pertaining to the regional data path deployments.
+      "natGatewayConfig": { # Represents the NAT Gateway configuration. # Optional. NAT gateway setup to ensure enough NAT IP addresses are available to handle the traffic needed to access the applications. Allows to explicitly enable or disable the NAT in the Hub along with the total IPs allocated to handle the capacity limits.
+        "natIps": [ # Output only. List of NAT IPs that will be used for establishing connection to the endpoints.
+          "A String",
+        ],
+      },
+    },
+  },
+  "name": "A String", # Identifier. Name of the resource.
+  "state": "A String", # Output only. The operational state of the SecurityGateway.
+  "updateTime": "A String", # Output only. Timestamp when the resource was last modified.
+}
+
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request timed out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  updateMask: string, Required. Mutable fields include: display_name, hubs.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ setPeering(securityGateway, body=None, x__xgafv=None) +
This is a custom method to allow customers to create a peering connections between Google network and customer networks. This is enabled only for the allowlisted customers.
+
+Args:
+  securityGateway: string, Required. BeyondCorp SecurityGateway name using the form: `projects/{project}/locations/{location}/securityGateways/{security_gateway}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Set Peering request for creating a VPC peering between Google network and customer networks.
+  "requestId": "A String", # Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  "validateOnly": True or False, # Optional. If set, validates request by executing a dry-run which would not alter the resource in any way.
+  "vpcPeerings": [ # Required. List of Peering connection information.
+    { # VPC Peering details.
+      "dnsZones": [ # Optional. List of DNS zones for DNS peering with the customer VPC network.
+        "A String",
+      ],
+      "targetVpcNetwork": "A String", # Required. The name of the Target VPC network name in the format: `projects/{project}/global/networks/{network}
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/beyondcorp_v1alpha.projects.locations.html b/docs/dyn/beyondcorp_v1alpha.projects.locations.html index 2231440d8f8..77a6bb9cad2 100644 --- a/docs/dyn/beyondcorp_v1alpha.projects.locations.html +++ b/docs/dyn/beyondcorp_v1alpha.projects.locations.html @@ -129,11 +129,6 @@

Instance Methods

Returns the insights Resource.

-

- netConnections() -

-

Returns the netConnections Resource.

-

operations()

diff --git a/docs/dyn/bigquery_v2.jobs.html b/docs/dyn/bigquery_v2.jobs.html index 8a7b75217d9..82756ca4ae5 100644 --- a/docs/dyn/bigquery_v2.jobs.html +++ b/docs/dyn/bigquery_v2.jobs.html @@ -1033,12 +1033,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -1063,6 +1067,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -1079,6 +1084,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -2447,12 +2453,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -2477,6 +2487,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -2493,6 +2504,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -3942,12 +3954,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -3972,6 +3988,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -3988,6 +4005,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -5327,12 +5345,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -5357,6 +5379,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -5373,6 +5396,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -6737,12 +6761,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -6767,6 +6795,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -6783,6 +6812,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. diff --git a/docs/dyn/bigquery_v2.models.html b/docs/dyn/bigquery_v2.models.html index 989e4660da6..f94e4c431e9 100644 --- a/docs/dyn/bigquery_v2.models.html +++ b/docs/dyn/bigquery_v2.models.html @@ -671,12 +671,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -701,6 +705,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -717,6 +722,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -1059,12 +1065,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -1089,6 +1099,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -1105,6 +1116,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -1735,12 +1747,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -1765,6 +1781,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -1781,6 +1798,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -2123,12 +2141,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -2153,6 +2175,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -2169,6 +2192,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -2808,12 +2832,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -2838,6 +2866,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -2854,6 +2883,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -3196,12 +3226,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -3226,6 +3260,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -3242,6 +3277,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -3860,12 +3896,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -3890,6 +3930,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -3906,6 +3947,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. @@ -4248,12 +4290,16 @@

Method Details

"colsampleBylevel": 3.14, # Subsample ratio of columns for each level for boosted tree models. "colsampleBynode": 3.14, # Subsample ratio of columns for each node(split) for boosted tree models. "colsampleBytree": 3.14, # Subsample ratio of columns when constructing each tree for boosted tree models. + "contributionMetric": "A String", # The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as "SUM(x)" or "SUM(x)/SUM(y)", where x and y are column names from the base table. "dartNormalizeType": "A String", # Type of normalization algorithm for boosted tree models using dart booster. "dataFrequency": "A String", # The data frequency of a time series. "dataSplitColumn": "A String", # The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties "dataSplitEvalFraction": 3.14, # The fraction of evaluation data over the whole input data. The rest of data will be used as training data. The format should be double. Accurate to two decimal places. Default value is 0.2. "dataSplitMethod": "A String", # The data split type for training and evaluation, e.g. RANDOM. "decomposeTimeSeries": True or False, # If true, perform decompose time series and save the results. + "dimensionIdColumns": [ # Optional. Names of the columns to slice on. Applies to contribution analysis models. + "A String", + ], "distanceType": "A String", # Distance type for clustering models. "dropout": 3.14, # Dropout probability for dnn models. "earlyStop": True or False, # Whether to stop early when the loss doesn't improve significantly any more (compared to min_relative_progress). Used only for iterative training algorithms. @@ -4278,6 +4324,7 @@

Method Details

], "instanceWeightColumn": "A String", # Name of the instance weight column for training data. This column isn't be used as a feature. "integratedGradientsNumSteps": "A String", # Number of integral steps for the integrated gradients explain method. + "isTestColumn": "A String", # Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models. "itemColumn": "A String", # Item column specified for matrix factorization models. "kmeansInitializationColumn": "A String", # The column used to provide the initial centroids for kmeans algorithm when kmeans_initialization_method is CUSTOM. "kmeansInitializationMethod": "A String", # The method used to initialize the centroids for kmeans algorithm. @@ -4294,6 +4341,7 @@

Method Details

"maxParallelTrials": "A String", # Maximum number of trials to run in parallel. "maxTimeSeriesLength": "A String", # The maximum number of time points in a time series that can be used in modeling the trend component of the time series. Don't use this option with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. "maxTreeDepth": "A String", # Maximum depth of a tree for boosted tree models. + "minAprioriSupport": 3.14, # The apriori support minimum. Applies to contribution analysis models. "minRelativeProgress": 3.14, # When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms. "minSplitLoss": 3.14, # Minimum split loss for boosted tree models. "minTimeSeriesLength": "A String", # The minimum number of time points in a time series that are used in modeling the trend component of the time series. If you use this option you must also set the `timeSeriesLengthFraction` option. This training option ensures that enough time points are available when you use `timeSeriesLengthFraction` in trend modeling. This is particularly important when forecasting multiple time series in a single query using `timeSeriesIdColumn`. If the total number of time points is less than the `minTimeSeriesLength` value, then the query uses all available time points. diff --git a/docs/dyn/businessprofileperformance_v1.locations.html b/docs/dyn/businessprofileperformance_v1.locations.html index c5ffe394d0c..d9cdf94310c 100644 --- a/docs/dyn/businessprofileperformance_v1.locations.html +++ b/docs/dyn/businessprofileperformance_v1.locations.html @@ -137,10 +137,10 @@

Method Details

"dailySubEntityType": { # Represents all possible subentity types that are associated with DailyMetrics. # The DailySubEntityType that the TimeSeries represents. Will not be present when breakdown does not exist. "dayOfWeek": "A String", # Represents the day of the week. Eg: MONDAY. Currently supported DailyMetrics = NONE. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Represents the time of the day in 24 hour format. Eg: 13:34:20 Currently supported DailyMetrics = NONE. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, "timeSeries": { # Represents a timeseries. # List of datapoints where each datapoint is a date-value pair. @@ -198,10 +198,10 @@

Method Details

FRIDAY - Friday SATURDAY - Saturday SUNDAY - Sunday - dailySubEntityType_timeOfDay_hours: integer, Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - dailySubEntityType_timeOfDay_minutes: integer, Minutes of hour of day. Must be from 0 to 59. - dailySubEntityType_timeOfDay_nanos: integer, Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - dailySubEntityType_timeOfDay_seconds: integer, Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + dailySubEntityType_timeOfDay_hours: integer, Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + dailySubEntityType_timeOfDay_minutes: integer, Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + dailySubEntityType_timeOfDay_nanos: integer, Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + dailySubEntityType_timeOfDay_seconds: integer, Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/calendar_v3.events.html b/docs/dyn/calendar_v3.events.html index b685c5a1f64..1ddc6f5ac9d 100644 --- a/docs/dyn/calendar_v3.events.html +++ b/docs/dyn/calendar_v3.events.html @@ -185,7 +185,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -482,7 +482,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -774,7 +774,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -1070,7 +1070,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -1371,7 +1371,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -1698,7 +1698,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -2091,7 +2091,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -2420,7 +2420,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -2717,7 +2717,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -3019,7 +3019,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -3325,7 +3325,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -3622,7 +3622,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], @@ -3924,7 +3924,7 @@

Method Details

# - "needsAction" - The attendee has not responded to the invitation (recommended for new events). # - "declined" - The attendee has declined the invitation. # - "tentative" - The attendee has tentatively accepted the invitation. - # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email. + # - "accepted" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the "Add invitations to my calendar" setting set to "When I respond to invitation in email" or "Only if the sender is known" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests. "self": false, # Whether this entry represents the calendar on which this copy of the event appears. Read-only. The default is False. }, ], diff --git a/docs/dyn/chat_v1.spaces.html b/docs/dyn/chat_v1.spaces.html index bcaf5787010..e70e497a684 100644 --- a/docs/dyn/chat_v1.spaces.html +++ b/docs/dyn/chat_v1.spaces.html @@ -97,25 +97,25 @@

Instance Methods

Completes the [import process](https://developers.google.com/workspace/chat/import-data) for the specified space and makes it visible to users. Requires app authentication and domain-wide delegation. For more information, see [Authorize Google Chat apps to import data](https://developers.google.com/workspace/chat/authorize-import).

create(body=None, requestId=None, x__xgafv=None)

-

Creates a space with no members. Can be used to create a named space. Spaces grouped by topics aren't supported. For an example, see [Create a space](https://developers.google.com/workspace/chat/create-spaces). If you receive the error message `ALREADY_EXISTS` when creating a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. If you're a member of the [Developer Preview program](https://developers.google.com/workspace/preview), you can create a group chat in import mode using `spaceType.GROUP_CHAT`. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Creates a space with no members. Can be used to create a named space. Spaces grouped by topics aren't supported. For an example, see [Create a space](https://developers.google.com/workspace/chat/create-spaces). If you receive the error message `ALREADY_EXISTS` when creating a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. If you're a member of the [Developer Preview program](https://developers.google.com/workspace/preview), you can create a group chat in import mode using `spaceType.GROUP_CHAT`. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When authenticating as an app, the `space.customer` field must be set in the request.

delete(name, useAdminAccess=None, x__xgafv=None)

-

Deletes a named space. Always performs a cascading delete, which means that the space's child resources—like messages posted in the space and memberships in the space—are also deleted. For an example, see [Delete a space](https://developers.google.com/workspace/chat/delete-spaces). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) from a user who has permission to delete the space.

+

Deletes a named space. Always performs a cascading delete, which means that the space's child resources—like messages posted in the space and memberships in the space—are also deleted. For an example, see [Delete a space](https://developers.google.com/workspace/chat/delete-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

findDirectMessage(name=None, x__xgafv=None)

-

Returns the existing direct message with the specified user. If no direct message space is found, returns a `404 NOT_FOUND` error. For an example, see [Find a direct message](/chat/api/guides/v1/spaces/find-direct-message). With [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), returns the direct message space between the specified user and the authenticated user. With [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app), returns the direct message space between the specified user and the calling Chat app. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) or [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app).

+

Returns the existing direct message with the specified user. If no direct message space is found, returns a `404 NOT_FOUND` error. For an example, see [Find a direct message](/chat/api/guides/v1/spaces/find-direct-message). With [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app), returns the direct message space between the specified user and the calling Chat app. With [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), returns the direct message space between the specified user and the authenticated user. // Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

get(name, useAdminAccess=None, x__xgafv=None)

-

Returns details about a space. For an example, see [Get details about a space](https://developers.google.com/workspace/chat/get-spaces). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Returns details about a space. For an example, see [Get details about a space](https://developers.google.com/workspace/chat/get-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists spaces the caller is a member of. Group chats and DMs aren't listed until the first message is sent. For an example, see [List spaces](https://developers.google.com/workspace/chat/list-spaces). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). Lists spaces visible to the caller or authenticated user. Group chats and DMs aren't listed until the first message is sent. To list all named spaces by Google Workspace organization, use the [`spaces.search()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/search) method using Workspace administrator privileges instead.

+

Lists spaces the caller is a member of. Group chats and DMs aren't listed until the first message is sent. For an example, see [List spaces](https://developers.google.com/workspace/chat/list-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) Lists spaces visible to the caller or authenticated user. Group chats and DMs aren't listed until the first message is sent. To list all named spaces by Google Workspace organization, use the [`spaces.search()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/search) method using Workspace administrator privileges instead.

list_next()

Retrieves the next page of results.

patch(name, body=None, updateMask=None, useAdminAccess=None, x__xgafv=None)

-

Updates a space. For an example, see [Update a space](https://developers.google.com/workspace/chat/update-spaces). If you're updating the `displayName` field and receive the error message `ALREADY_EXISTS`, try a different display name.. An existing space within the Google Workspace organization might already use this display name. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Updates a space. For an example, see [Update a space](https://developers.google.com/workspace/chat/update-spaces). If you're updating the `displayName` field and receive the error message `ALREADY_EXISTS`, try a different display name.. An existing space within the Google Workspace organization might already use this display name. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

search(orderBy=None, pageSize=None, pageToken=None, query=None, useAdminAccess=None, x__xgafv=None)

Returns a list of spaces in a Google Workspace organization based on an administrator's search. Requires [user authentication with administrator privileges](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user#admin-privileges). In the request, set `use_admin_access` to `true`.

@@ -155,19 +155,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # The import mode space. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -185,7 +220,7 @@

Method Details

create(body=None, requestId=None, x__xgafv=None) -
Creates a space with no members. Can be used to create a named space. Spaces grouped by topics aren't supported. For an example, see [Create a space](https://developers.google.com/workspace/chat/create-spaces). If you receive the error message `ALREADY_EXISTS` when creating a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. If you're a member of the [Developer Preview program](https://developers.google.com/workspace/preview), you can create a group chat in import mode using `spaceType.GROUP_CHAT`. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Creates a space with no members. Can be used to create a named space. Spaces grouped by topics aren't supported. For an example, see [Create a space](https://developers.google.com/workspace/chat/create-spaces). If you receive the error message `ALREADY_EXISTS` when creating a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. If you're a member of the [Developer Preview program](https://developers.google.com/workspace/preview), you can create a group chat in import mode using `spaceType.GROUP_CHAT`. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When authenticating as an app, the `space.customer` field must be set in the request.
 
 Args:
   body: object, The request body.
@@ -194,19 +229,54 @@ 

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -232,19 +302,54 @@

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -261,7 +366,7 @@

Method Details

delete(name, useAdminAccess=None, x__xgafv=None) -
Deletes a named space. Always performs a cascading delete, which means that the space's child resources—like messages posted in the space and memberships in the space—are also deleted. For an example, see [Delete a space](https://developers.google.com/workspace/chat/delete-spaces). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) from a user who has permission to delete the space.
+  
Deletes a named space. Always performs a cascading delete, which means that the space's child resources—like messages posted in the space and memberships in the space—are also deleted. For an example, see [Delete a space](https://developers.google.com/workspace/chat/delete-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   name: string, Required. Resource name of the space to delete. Format: `spaces/{space}` (required)
@@ -280,7 +385,7 @@ 

Method Details

findDirectMessage(name=None, x__xgafv=None) -
Returns the existing direct message with the specified user. If no direct message space is found, returns a `404 NOT_FOUND` error. For an example, see [Find a direct message](/chat/api/guides/v1/spaces/find-direct-message). With [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), returns the direct message space between the specified user and the authenticated user. With [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app), returns the direct message space between the specified user and the calling Chat app. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) or [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app).
+  
Returns the existing direct message with the specified user. If no direct message space is found, returns a `404 NOT_FOUND` error. For an example, see [Find a direct message](/chat/api/guides/v1/spaces/find-direct-message). With [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app), returns the direct message space between the specified user and the calling Chat app. With [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), returns the direct message space between the specified user and the authenticated user. // Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   name: string, Required. Resource name of the user to find direct message with. Format: `users/{user}`, where `{user}` is either the `id` for the [person](https://developers.google.com/people/api/rest/v1/people) from the People API, or the `id` for the [user](https://developers.google.com/admin-sdk/directory/reference/rest/v1/users) in the Directory API. For example, if the People API profile ID is `123456789`, you can find a direct message with that person by using `users/123456789` as the `name`. When [authenticated as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can use the email as an alias for `{user}`. For example, `users/example@gmail.com` where `example@gmail.com` is the email of the Google Chat user.
@@ -295,19 +400,54 @@ 

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -324,7 +464,7 @@

Method Details

get(name, useAdminAccess=None, x__xgafv=None) -
Returns details about a space. For an example, see [Get details about a space](https://developers.google.com/workspace/chat/get-spaces). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Returns details about a space. For an example, see [Get details about a space](https://developers.google.com/workspace/chat/get-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   name: string, Required. Resource name of the space, in the form `spaces/{space}`. Format: `spaces/{space}` (required)
@@ -340,19 +480,54 @@ 

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -369,7 +544,7 @@

Method Details

list(filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists spaces the caller is a member of. Group chats and DMs aren't listed until the first message is sent. For an example, see [List spaces](https://developers.google.com/workspace/chat/list-spaces). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). Lists spaces visible to the caller or authenticated user. Group chats and DMs aren't listed until the first message is sent. To list all named spaces by Google Workspace organization, use the [`spaces.search()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/search) method using Workspace administrator privileges instead.
+  
Lists spaces the caller is a member of. Group chats and DMs aren't listed until the first message is sent. For an example, see [List spaces](https://developers.google.com/workspace/chat/list-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) Lists spaces visible to the caller or authenticated user. Group chats and DMs aren't listed until the first message is sent. To list all named spaces by Google Workspace organization, use the [`spaces.search()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/search) method using Workspace administrator privileges instead.
 
 Args:
   filter: string, Optional. A query filter. You can filter spaces by the space type ([`space_type`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces#spacetype)). To filter by space type, you must specify valid enum value, such as `SPACE` or `GROUP_CHAT` (the `space_type` can't be `SPACE_TYPE_UNSPECIFIED`). To query for multiple space types, use the `OR` operator. For example, the following queries are valid: ``` space_type = "SPACE" spaceType = "GROUP_CHAT" OR spaceType = "DIRECT_MESSAGE" ``` Invalid queries are rejected by the server with an `INVALID_ARGUMENT` error.
@@ -389,19 +564,54 @@ 

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -434,7 +644,7 @@

Method Details

patch(name, body=None, updateMask=None, useAdminAccess=None, x__xgafv=None) -
Updates a space. For an example, see [Update a space](https://developers.google.com/workspace/chat/update-spaces). If you're updating the `displayName` field and receive the error message `ALREADY_EXISTS`, try a different display name.. An existing space within the Google Workspace organization might already use this display name. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Updates a space. For an example, see [Update a space](https://developers.google.com/workspace/chat/update-spaces). If you're updating the `displayName` field and receive the error message `ALREADY_EXISTS`, try a different display name.. An existing space within the Google Workspace organization might already use this display name. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   name: string, Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. (required)
@@ -444,19 +654,54 @@ 

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -470,7 +715,7 @@

Method Details

"type": "A String", # Output only. Deprecated: Use `space_type` instead. The type of a space. } - updateMask: string, Required. The updated field paths, comma separated if there are multiple. You can update the following fields for a space: - `space_details` - `display_name`: Only supports updating the display name for spaces where `spaceType` field is `SPACE`. If you receive the error message `ALREADY_EXISTS`, try a different value. An existing space within the Google Workspace organization might already use this display name. - `space_type`: Only supports changing a `GROUP_CHAT` space type to `SPACE`. Include `display_name` together with `space_type` in the update mask and ensure that the specified space has a non-empty display name and the `SPACE` space type. Including the `space_type` mask and the `SPACE` type in the specified space when updating the display name is optional if the existing space already has the `SPACE` type. Trying to update the space type in other ways results in an invalid argument error. `space_type` is not supported with admin access. - `space_history_state`: Updates [space history settings](https://support.google.com/chat/answer/7664687) by turning history on or off for the space. Only supported if history settings are enabled for the Google Workspace organization. To update the space history state, you must omit all other field masks in your request. `space_history_state` is not supported with admin access. - `access_settings.audience`: Updates the [access setting](https://support.google.com/chat/answer/11971020) of who can discover the space, join the space, and preview the messages in named space where `spaceType` field is `SPACE`. If the existing space has a target audience, you can remove the audience and restrict space access by omitting a value for this field mask. To update access settings for a space, the authenticating user must be a space manager and omit all other field masks in your request. You can't update this field if the space is in [import mode](https://developers.google.com/workspace/chat/import-data-overview). To learn more, see [Make a space discoverable to specific users](https://developers.google.com/workspace/chat/space-target-audience). `access_settings.audience` is not supported with admin access. - Developer Preview: Supports changing the [permission settings](https://support.google.com/chat/answer/13340792) of a space, supported field paths include: `permission_settings.manage_members_and_groups`, `permission_settings.modify_space_details`, `permission_settings.toggle_history`, `permission_settings.use_at_mention_all`, `permission_settings.manage_apps`, `permission_settings.manage_webhooks`, `permission_settings.reply_messages` (Warning: mutually exclusive with all other non-permission settings field paths). `permission_settings` is not supported with admin access. + updateMask: string, - Supports changing the [permission settings](https://support.google.com/chat/answer/13340792) of a space, supported field paths include: `permission_settings.manage_members_and_groups`, `permission_settings.modify_space_details`, `permission_settings.toggle_history`, `permission_settings.use_at_mention_all`, `permission_settings.manage_apps`, `permission_settings.manage_webhooks`, `permission_settings.reply_messages` (Warning: mutually exclusive with all other non-permission settings field paths). `permission_settings` is not supported with admin access. useAdminAccess: boolean, When `true`, the method runs using the user's Google Workspace administrator privileges. The calling user must be a Google Workspace administrator with the [manage chat and spaces conversations privilege](https://support.google.com/a/answer/13369245). Requires the `chat.admin.spaces` [OAuth 2.0 scope](https://developers.google.com/workspace/chat/authenticate-authorize#chat-api-scopes). Some `FieldMask` values are not supported using admin access. For details, see the description of `update_mask`. x__xgafv: string, V1 error format. Allowed values @@ -483,19 +728,54 @@

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -534,19 +814,54 @@

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -591,7 +906,7 @@

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -610,19 +925,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # Required. The `Space.spaceType` field is required. To create a space, set `Space.spaceType` to `SPACE` and set `Space.displayName`. If you receive the error message `ALREADY_EXISTS` when setting up a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. To create a group chat, set `Space.spaceType` to `GROUP_CHAT`. Don't set `Space.displayName`. To create a 1:1 conversation between humans, set `Space.spaceType` to `DIRECT_MESSAGE` and set `Space.singleUserBotDm` to `false`. Don't set `Space.displayName` or `Space.spaceDetails`. To create an 1:1 conversation between a human and the calling Chat app, set `Space.spaceType` to `DIRECT_MESSAGE` and `Space.singleUserBotDm` to `true`. Don't set `Space.displayName` or `Space.spaceDetails`. If a `DIRECT_MESSAGE` space already exists, that space is returned instead of creating a new space. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -648,19 +998,54 @@

Method Details

{ # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. diff --git a/docs/dyn/chat_v1.spaces.members.html b/docs/dyn/chat_v1.spaces.members.html index b83e988ade3..91211f4b164 100644 --- a/docs/dyn/chat_v1.spaces.members.html +++ b/docs/dyn/chat_v1.spaces.members.html @@ -79,22 +79,22 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, useAdminAccess=None, x__xgafv=None)

-

Creates a membership for the calling Chat app, a user, or a Google Group. Creating memberships for other Chat apps isn't supported. When creating a membership, if the specified member has their auto-accept policy turned off, then they're invited, and must accept the space invitation before joining. Otherwise, creating a membership adds the member directly to the specified space. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). For example usage, see: - [Invite or add a user to a space](https://developers.google.com/workspace/chat/create-members#create-user-membership). - [Invite or add a Google Group to a space](https://developers.google.com/workspace/chat/create-members#create-group-membership). - [Add the Chat app to a space](https://developers.google.com/workspace/chat/create-members#create-membership-calling-api).

+

Creates a membership for the calling Chat app, a user, or a Google Group. Creating memberships for other Chat apps isn't supported. When creating a membership, if the specified member has their auto-accept policy turned off, then they're invited, and must accept the space invitation before joining. Otherwise, creating a membership adds the member directly to the specified space. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) For example usage, see: - [Invite or add a user to a space](https://developers.google.com/workspace/chat/create-members#create-user-membership). - [Invite or add a Google Group to a space](https://developers.google.com/workspace/chat/create-members#create-group-membership). - [Add the Chat app to a space](https://developers.google.com/workspace/chat/create-members#create-membership-calling-api).

delete(name, useAdminAccess=None, x__xgafv=None)

-

Deletes a membership. For an example, see [Remove a user or a Google Chat app from a space](https://developers.google.com/workspace/chat/delete-members). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Deletes a membership. For an example, see [Remove a user or a Google Chat app from a space](https://developers.google.com/workspace/chat/delete-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

get(name, useAdminAccess=None, x__xgafv=None)

-

Returns details about a membership. For an example, see [Get details about a user's or Google Chat app's membership](https://developers.google.com/workspace/chat/get-members). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Returns details about a membership. For an example, see [Get details about a user's or Google Chat app's membership](https://developers.google.com/workspace/chat/get-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

list(parent, filter=None, pageSize=None, pageToken=None, showGroups=None, showInvited=None, useAdminAccess=None, x__xgafv=None)

-

Lists memberships in a space. For an example, see [List users and Google Chat apps in a space](https://developers.google.com/workspace/chat/list-members). Listing memberships with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) lists memberships in spaces that the Chat app has access to, but excludes Chat app memberships, including its own. Listing memberships with [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) lists memberships in spaces that the authenticated user has access to. Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Lists memberships in a space. For an example, see [List users and Google Chat apps in a space](https://developers.google.com/workspace/chat/list-members). Listing memberships with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) lists memberships in spaces that the Chat app has access to, but excludes Chat app memberships, including its own. Listing memberships with [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) lists memberships in spaces that the authenticated user has access to. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

list_next()

Retrieves the next page of results.

patch(name, body=None, updateMask=None, useAdminAccess=None, x__xgafv=None)

-

Updates a membership. For an example, see [Update a user's membership in a space](https://developers.google.com/workspace/chat/update-members). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

+

Updates a membership. For an example, see [Update a user's membership in a space](https://developers.google.com/workspace/chat/update-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)

Method Details

close() @@ -103,7 +103,7 @@

Method Details

create(parent, body=None, useAdminAccess=None, x__xgafv=None) -
Creates a membership for the calling Chat app, a user, or a Google Group. Creating memberships for other Chat apps isn't supported. When creating a membership, if the specified member has their auto-accept policy turned off, then they're invited, and must accept the space invitation before joining. Otherwise, creating a membership adds the member directly to the specified space. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). For example usage, see: - [Invite or add a user to a space](https://developers.google.com/workspace/chat/create-members#create-user-membership). - [Invite or add a Google Group to a space](https://developers.google.com/workspace/chat/create-members#create-group-membership). - [Add the Chat app to a space](https://developers.google.com/workspace/chat/create-members#create-membership-calling-api).
+  
Creates a membership for the calling Chat app, a user, or a Google Group. Creating memberships for other Chat apps isn't supported. When creating a membership, if the specified member has their auto-accept policy turned off, then they're invited, and must accept the space invitation before joining. Otherwise, creating a membership adds the member directly to the specified space. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) For example usage, see: - [Invite or add a user to a space](https://developers.google.com/workspace/chat/create-members#create-user-membership). - [Invite or add a Google Group to a space](https://developers.google.com/workspace/chat/create-members#create-group-membership). - [Add the Chat app to a space](https://developers.google.com/workspace/chat/create-members#create-membership-calling-api).
 
 Args:
   parent: string, Required. The resource name of the space for which to create the membership. Format: spaces/{space} (required)
@@ -113,7 +113,7 @@ 

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -140,7 +140,7 @@

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -158,7 +158,7 @@

Method Details

delete(name, useAdminAccess=None, x__xgafv=None) -
Deletes a membership. For an example, see [Remove a user or a Google Chat app from a space](https://developers.google.com/workspace/chat/delete-members). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Deletes a membership. For an example, see [Remove a user or a Google Chat app from a space](https://developers.google.com/workspace/chat/delete-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   name: string, Required. Resource name of the membership to delete. Chat apps can delete human users' or their own memberships. Chat apps can't delete other apps' memberships. When deleting a human membership, requires the `chat.memberships` scope and `spaces/{space}/members/{member}` format. You can use the email as an alias for `{member}`. For example, `spaces/{space}/members/example@gmail.com` where `example@gmail.com` is the email of the Google Chat user. When deleting an app membership, requires the `chat.memberships.app` scope and `spaces/{space}/members/app` format. Format: `spaces/{space}/members/{member}` or `spaces/{space}/members/app`. (required)
@@ -174,7 +174,7 @@ 

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -192,10 +192,10 @@

Method Details

get(name, useAdminAccess=None, x__xgafv=None) -
Returns details about a membership. For an example, see [Get details about a user's or Google Chat app's membership](https://developers.google.com/workspace/chat/get-members). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Returns details about a membership. For an example, see [Get details about a user's or Google Chat app's membership](https://developers.google.com/workspace/chat/get-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
-  name: string, Required. Resource name of the membership to retrieve. To get the app's own membership [by using user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can optionally use `spaces/{space}/members/app`. Format: `spaces/{space}/members/{member}` or `spaces/{space}/members/app` When [authenticated as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can use the user's email as an alias for `{member}`. For example, `spaces/{space}/members/example@gmail.com` where `example@gmail.com` is the email of the Google Chat user. (required)
+  name: string, Required. Resource name of the membership to retrieve. To get the app's own membership [by using user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can optionally use `spaces/{space}/members/app`. Format: `spaces/{space}/members/{member}` or `spaces/{space}/members/app` You can use the user's email as an alias for `{member}`. For example, `spaces/{space}/members/example@gmail.com` where `example@gmail.com` is the email of the Google Chat user. (required)
   useAdminAccess: boolean, When `true`, the method runs using the user's Google Workspace administrator privileges. The calling user must be a Google Workspace administrator with the [manage chat and spaces conversations privilege](https://support.google.com/a/answer/13369245). Requires the `chat.admin.memberships` or `chat.admin.memberships.readonly` [OAuth 2.0 scopes](https://developers.google.com/workspace/chat/authenticate-authorize#chat-api-scopes). Getting app memberships in a space isn't supported when using admin access.
   x__xgafv: string, V1 error format.
     Allowed values
@@ -208,7 +208,7 @@ 

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -226,7 +226,7 @@

Method Details

list(parent, filter=None, pageSize=None, pageToken=None, showGroups=None, showInvited=None, useAdminAccess=None, x__xgafv=None) -
Lists memberships in a space. For an example, see [List users and Google Chat apps in a space](https://developers.google.com/workspace/chat/list-members). Listing memberships with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) lists memberships in spaces that the Chat app has access to, but excludes Chat app memberships, including its own. Listing memberships with [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) lists memberships in spaces that the authenticated user has access to. Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Lists memberships in a space. For an example, see [List users and Google Chat apps in a space](https://developers.google.com/workspace/chat/list-members). Listing memberships with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) lists memberships in spaces that the Chat app has access to, but excludes Chat app memberships, including its own. Listing memberships with [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) lists memberships in spaces that the authenticated user has access to. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   parent: string, Required. The resource name of the space for which to fetch a membership list. Format: spaces/{space} (required)
@@ -249,7 +249,7 @@ 

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -284,7 +284,7 @@

Method Details

patch(name, body=None, updateMask=None, useAdminAccess=None, x__xgafv=None) -
Updates a membership. For an example, see [Update a user's membership in a space](https://developers.google.com/workspace/chat/update-members). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).
+  
Updates a membership. For an example, see [Update a user's membership in a space](https://developers.google.com/workspace/chat/update-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)
 
 Args:
   name: string, Resource name of the membership, assigned by the server. Format: `spaces/{space}/members/{member}` (required)
@@ -294,7 +294,7 @@ 

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -322,7 +322,7 @@

Method Details

{ # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. diff --git a/docs/dyn/chat_v1.spaces.messages.html b/docs/dyn/chat_v1.spaces.messages.html index 431345a7c4a..b4226d34739 100644 --- a/docs/dyn/chat_v1.spaces.messages.html +++ b/docs/dyn/chat_v1.spaces.messages.html @@ -92,10 +92,10 @@

Instance Methods

Creates a message in a Google Chat space. For an example, see [Send a message](https://developers.google.com/workspace/chat/create-messages). The `create()` method requires either user or app authentication. Chat attributes the message sender differently depending on the type of authentication that you use in your request. The following image shows how Chat attributes a message when you use app authentication. Chat displays the Chat app as the message sender. The content of the message can contain text (`text`), cards (`cardsV2`), and accessory widgets (`accessoryWidgets`). ![Message sent with app authentication](https://developers.google.com/workspace/chat/images/message-app-auth.svg) The following image shows how Chat attributes a message when you use user authentication. Chat displays the user as the message sender and attributes the Chat app to the message by displaying its name. The content of message can only contain text (`text`). ![Message sent with user authentication](https://developers.google.com/workspace/chat/images/message-user-auth.svg) The maximum message size, including the message contents, is 32,000 bytes.

delete(name, force=None, x__xgafv=None)

-

Deletes a message. For an example, see [Delete a message](https://developers.google.com/workspace/chat/delete-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only delete messages created by the calling Chat app.

+

Deletes a message. For an example, see [Delete a message](https://developers.google.com/workspace/chat/delete-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only delete messages created by the calling Chat app.

get(name, x__xgafv=None)

-

Returns details about a message. For an example, see [Get details about a message](https://developers.google.com/workspace/chat/get-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). Note: Might return a message from a blocked member or space.

+

Returns details about a message. For an example, see [Get details about a message](https://developers.google.com/workspace/chat/get-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) Note: Might return a message from a blocked member or space.

list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, showDeleted=None, x__xgafv=None)

Lists messages in a space that the caller is a member of, including messages from blocked members and spaces. If you list messages from a space with no messages, the response is an empty object. When using a REST/HTTP interface, the response contains an empty JSON object, `{}`. For an example, see [List messages](https://developers.google.com/workspace/chat/api/guides/v1/messages/list). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).

@@ -104,10 +104,10 @@

Instance Methods

Retrieves the next page of results.

patch(name, allowMissing=None, body=None, updateMask=None, x__xgafv=None)

-

Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only update messages created by the calling Chat app.

+

Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only update messages created by the calling Chat app.

update(name, allowMissing=None, body=None, updateMask=None, x__xgafv=None)

-

Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only update messages created by the calling Chat app.

+

Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only update messages created by the calling Chat app.

Method Details

close() @@ -1858,19 +1858,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -3642,19 +3677,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -3678,7 +3748,7 @@

Method Details

delete(name, force=None, x__xgafv=None) -
Deletes a message. For an example, see [Delete a message](https://developers.google.com/workspace/chat/delete-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only delete messages created by the calling Chat app.
+  
Deletes a message. For an example, see [Delete a message](https://developers.google.com/workspace/chat/delete-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only delete messages created by the calling Chat app.
 
 Args:
   name: string, Required. Resource name of the message. Format: `spaces/{space}/messages/{message}` If you've set a custom ID for your message, you can use the value from the `clientAssignedMessageId` field for `{message}`. For details, see [Name a message] (https://developers.google.com/workspace/chat/create-messages#name_a_created_message). (required)
@@ -3697,7 +3767,7 @@ 

Method Details

get(name, x__xgafv=None) -
Returns details about a message. For an example, see [Get details about a message](https://developers.google.com/workspace/chat/get-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). Note: Might return a message from a blocked member or space.
+  
Returns details about a message. For an example, see [Get details about a message](https://developers.google.com/workspace/chat/get-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) Note: Might return a message from a blocked member or space.
 
 Args:
   name: string, Required. Resource name of the message. Format: `spaces/{space}/messages/{message}` If you've set a custom ID for your message, you can use the value from the `clientAssignedMessageId` field for `{message}`. For details, see [Name a message] (https://developers.google.com/workspace/chat/create-messages#name_a_created_message). (required)
@@ -5444,19 +5514,54 @@ 

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -7234,19 +7339,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -7287,7 +7427,7 @@

Method Details

patch(name, allowMissing=None, body=None, updateMask=None, x__xgafv=None) -
Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only update messages created by the calling Chat app.
+  
Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only update messages created by the calling Chat app.
 
 Args:
   name: string, Resource name of the message. Format: `spaces/{space}/messages/{message}` Where `{space}` is the ID of the space where the message is posted and `{message}` is a system-assigned ID for the message. For example, `spaces/AAAAAAAAAAA/messages/BBBBBBBBBBB.BBBBBBBBBBB`. If you set a custom ID when you create a message, you can use this ID to specify the message in a request by replacing `{message}` with the value from the `clientAssignedMessageId` field. For example, `spaces/AAAAAAAAAAA/messages/client-custom-name`. For details, see [Name a message](https://developers.google.com/workspace/chat/create-messages#name_a_created_message). (required)
@@ -9029,19 +9169,54 @@ 

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -10807,19 +10982,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -10843,7 +11053,7 @@

Method Details

update(name, allowMissing=None, body=None, updateMask=None, x__xgafv=None) -
Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only update messages created by the calling Chat app.
+  
Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only update messages created by the calling Chat app.
 
 Args:
   name: string, Resource name of the message. Format: `spaces/{space}/messages/{message}` Where `{space}` is the ID of the space where the message is posted and `{message}` is a system-assigned ID for the message. For example, `spaces/AAAAAAAAAAA/messages/BBBBBBBBBBB.BBBBBBBBBBB`. If you set a custom ID when you create a message, you can use this ID to specify the message in a request by replacing `{message}` with the value from the `clientAssignedMessageId` field. For example, `spaces/AAAAAAAAAAA/messages/client-custom-name`. For details, see [Name a message](https://developers.google.com/workspace/chat/create-messages#name_a_created_message). (required)
@@ -12585,19 +12795,54 @@ 

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -14363,19 +14608,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. diff --git a/docs/dyn/chat_v1.spaces.spaceEvents.html b/docs/dyn/chat_v1.spaces.spaceEvents.html index 94f6da02587..b3e4d1b8ef5 100644 --- a/docs/dyn/chat_v1.spaces.spaceEvents.html +++ b/docs/dyn/chat_v1.spaces.spaceEvents.html @@ -115,7 +115,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The new membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -138,7 +138,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The deleted membership. Only the `name` and `state` fields are populated. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -161,7 +161,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The updated membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -182,7 +182,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The new membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -201,7 +201,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The deleted membership. Only the `name` and `state` fields are populated. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -220,7 +220,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The updated membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -1973,19 +1973,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -3746,19 +3781,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -5519,19 +5589,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -7290,19 +7395,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -9059,19 +9199,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -10828,19 +11003,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -10948,19 +11158,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # The updated space. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -10980,19 +11225,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # The updated space. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -11038,7 +11318,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The new membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -11061,7 +11341,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The deleted membership. Only the `name` and `state` fields are populated. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -11084,7 +11364,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The updated membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -11105,7 +11385,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The new membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -11124,7 +11404,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The deleted membership. Only the `name` and `state` fields are populated. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -11143,7 +11423,7 @@

Method Details

"membership": { # Represents a membership relation in Google Chat, such as whether a user or Chat app is invited to, part of, or absent from a space. # The updated membership. "createTime": "A String", # Optional. Immutable. The creation time of the membership, such as when a member joined or was invited to join a space. This field is output only, except when used to import historical memberships in import mode spaces. "deleteTime": "A String", # Optional. Immutable. The deletion time of the membership, such as when a member left or was removed from a space. This field is output only, except when used to import historical memberships in import mode spaces. - "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. + "groupMember": { # A Google Group in Google Chat. # The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "name": "A String", # Resource name for a Google Group. Represents a [group](https://cloud.google.com/identity/docs/reference/rest/v1/groups) in Cloud Identity Groups API. Format: groups/{group} }, "member": { # A user in Google Chat. When returned as an output from a request, if your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output for a `User` resource only populates the user's `name` and `type`. # The Google Chat user or app the membership corresponds to. If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [user](https://developers.google.com/workspace/chat/api/reference/rest/v1/User) `name` and `type`. @@ -12896,19 +13176,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -14669,19 +14984,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -16442,19 +16792,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -18213,19 +18598,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -19982,19 +20402,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -21751,19 +22206,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # If your Chat app [authenticates as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), the output populates the [space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces) `name`. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -21871,19 +22361,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # The updated space. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. @@ -21903,19 +22428,54 @@

Method Details

"space": { # A space in Google Chat. Spaces are conversations between two or more users or 1:1 messages between a user and a Chat app. # The updated space. "accessSettings": { # Represents the [access setting](https://support.google.com/chat/answer/11971020) of the space. # Optional. Specifies the [access setting](https://support.google.com/chat/answer/11971020) of the space. Only populated when the `space_type` is `SPACE`. "accessState": "A String", # Output only. Indicates the access state of the space. - "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). + "audience": "A String", # Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). }, "adminInstalled": True or False, # Output only. For direct message (DM) spaces with a Chat app, whether the space was created by a Google Workspace administrator. Administrators can install and set up a direct message with a Chat app on behalf of users in their organization. To support admin install, your Chat app must feature direct messaging. "createTime": "A String", # Optional. Immutable. For spaces created in Chat, the time the space was created. This field is output only, except when used in import mode spaces. For import mode spaces, set this field to the historical timestamp at which the space was created in the source in order to preserve the original creation time. Only populated in the output when `spaceType` is `GROUP_CHAT` or `SPACE`. "displayName": "A String", # The space's display name. Required when [creating a space](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/create) with a `spaceType` of `SPACE`. If you receive the error message `ALREADY_EXISTS` when creating a space or updating the `displayName`, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. For direct messages, this field might be empty. Supports up to 128 characters. "externalUserAllowed": True or False, # Immutable. Whether this space permits any Google Chat user as a member. Input when creating a space in a Google Workspace organization. Omit this field when creating spaces in the following conditions: * The authenticated user uses a consumer account (unmanaged user account). By default, a space created by a consumer account permits any Google Chat user. For existing spaces, this field is output only. - "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. + "importMode": True or False, # Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). "lastActiveTime": "A String", # Output only. Timestamp of the last message in the space. "membershipCount": { # Represents the count of memberships of a space, grouped into categories. # Output only. The count of joined memberships grouped by member type. Populated when the `space_type` is `SPACE`, `DIRECT_MESSAGE` or `GROUP_CHAT`. "joinedDirectHumanUserCount": 42, # Count of human users that have directly joined the space, not counting users joined by having membership in a joined group. "joinedGroupCount": 42, # Count of all groups that have directly joined the space. }, "name": "A String", # Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`. + "permissionSettings": { # [Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request. # Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead. + "manageApps": { # Represents a space permission setting. # Setting for managing apps in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageMembersAndGroups": { # Represents a space permission setting. # Setting for managing members and groups in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "manageWebhooks": { # Represents a space permission setting. # Setting for managing webhooks in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "modifySpaceDetails": { # Represents a space permission setting. # Setting for updating space name, avatar, description and guidelines. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "postMessages": { # Represents a space permission setting. # Output only. Setting for posting messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "replyMessages": { # Represents a space permission setting. # Setting for replying to messages in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "toggleHistory": { # Represents a space permission setting. # Setting for toggling space history on and off. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + "useAtMentionAll": { # Represents a space permission setting. # Setting for using @all in a space. + "managersAllowed": True or False, # Whether spaces managers have this permission. + "membersAllowed": True or False, # Whether non-manager members have this permission. + }, + }, + "predefinedPermissionSettings": "A String", # Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field. "singleUserBotDm": True or False, # Optional. Whether the space is a DM between a Chat app and a single human. "spaceDetails": { # Details about the space including description and rules. # Details about the space including description and rules. "description": "A String", # Optional. A description of the space. For example, describe the space's discussion topic, functional purpose, or participants. Supports up to 150 characters. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html index fd4e6015784..a9a4f7b1448 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automationRuns.html @@ -199,12 +199,28 @@

Method Details

"jobs": [ # Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in `source_phase`. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. "A String", ], + "phases": [ # Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + "A String", + ], + "repairPhases": [ # Required. Defines the types of automatic repair phases for failed jobs. + { # Configuration of the repair phase. + "retry": { # Retries the failed job. # Optional. Retries a failed job. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. + "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. + }, + "rollback": { # Rolls back a `Rollout`. # Optional. Rolls back a `Rollout`. + "destinationPhase": "A String", # Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase. + "disableRollbackIfRolloutPending": True or False, # Optional. If pending rollout exists on the target, the rollback operation will be aborted. + }, + }, + ], }, }, ], "selector": { # AutomationResourceSelector contains the information to select the resources to which an Automation is going to be applied. # Required. Selected resources to which the automation will be applied. "targets": [ # Contains attributes about a target. - { # Contains criteria for selecting Targets. + { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -221,7 +237,7 @@

Method Details

"etag": "A String", # Output only. The weak etag of the `AutomationRun` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. Time the `AutomationRun` expires. An `AutomationRun` expires after 14 days from its creation date. "name": "A String", # Output only. Name of the `AutomationRun`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}/automationRuns/{automation_run}`. - "policyViolation": { # Returned from an action if one or more policies were violated, and therefore the action was prevented. Contains information about what policies were violated and why. # Output only. Contains information about what policies prevented the `AutomationRun` to proceed. + "policyViolation": { # Returned from an action if one or more policies were violated, and therefore the action was prevented. Contains information about what policies were violated and why. # Output only. Contains information about what policies prevented the `AutomationRun` from proceeding. "policyViolationDetails": [ # Policy violation details. { # Policy violation details. "failureMessage": "A String", # User readable message about why the request violated a policy. This is not intended for machine parsing. @@ -237,6 +253,7 @@

Method Details

"wait": "A String", # Output only. How long the operation will be paused. }, "repairRolloutOperation": { # Contains the information for an automated `repair rollout` operation. # Output only. Repairs a failed 'Rollout'. + "currentRepairPhaseIndex": "A String", # Output only. The index of the current repair action in the repair sequence. "jobId": "A String", # Output only. The job ID for the Job to repair. "phaseId": "A String", # Output only. The phase ID of the phase that includes the job being repaired. "repairPhases": [ # Output only. Records of the repair attempts. Each repair phase may have multiple retry attempts or single rollback attempt. @@ -255,6 +272,7 @@

Method Details

}, "rollback": { # RollbackAttempt represents an action of rolling back a Cloud Deploy 'Target'. # Output only. Rollback attempt for rollback repair mode . "destinationPhase": "A String", # Output only. The phase to which the rollout will be rolled back to. + "disableRollbackIfRolloutPending": True or False, # Output only. If active rollout exists on the target, abort this rollback. "rolloutId": "A String", # Output only. ID of the rollback `Rollout` to create. "state": "A String", # Output only. Valid state of this rollback action. "stateDesc": "A String", # Output only. Description of the state of the Rollback. @@ -359,12 +377,28 @@

Method Details

"jobs": [ # Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in `source_phase`. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. "A String", ], + "phases": [ # Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + "A String", + ], + "repairPhases": [ # Required. Defines the types of automatic repair phases for failed jobs. + { # Configuration of the repair phase. + "retry": { # Retries the failed job. # Optional. Retries a failed job. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. + "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. + }, + "rollback": { # Rolls back a `Rollout`. # Optional. Rolls back a `Rollout`. + "destinationPhase": "A String", # Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase. + "disableRollbackIfRolloutPending": True or False, # Optional. If pending rollout exists on the target, the rollback operation will be aborted. + }, + }, + ], }, }, ], "selector": { # AutomationResourceSelector contains the information to select the resources to which an Automation is going to be applied. # Required. Selected resources to which the automation will be applied. "targets": [ # Contains attributes about a target. - { # Contains criteria for selecting Targets. + { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -381,7 +415,7 @@

Method Details

"etag": "A String", # Output only. The weak etag of the `AutomationRun` resource. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. "expireTime": "A String", # Output only. Time the `AutomationRun` expires. An `AutomationRun` expires after 14 days from its creation date. "name": "A String", # Output only. Name of the `AutomationRun`. Format is `projects/{project}/locations/{location}/deliveryPipelines/{delivery_pipeline}/automationRuns/{automation_run}`. - "policyViolation": { # Returned from an action if one or more policies were violated, and therefore the action was prevented. Contains information about what policies were violated and why. # Output only. Contains information about what policies prevented the `AutomationRun` to proceed. + "policyViolation": { # Returned from an action if one or more policies were violated, and therefore the action was prevented. Contains information about what policies were violated and why. # Output only. Contains information about what policies prevented the `AutomationRun` from proceeding. "policyViolationDetails": [ # Policy violation details. { # Policy violation details. "failureMessage": "A String", # User readable message about why the request violated a policy. This is not intended for machine parsing. @@ -397,6 +431,7 @@

Method Details

"wait": "A String", # Output only. How long the operation will be paused. }, "repairRolloutOperation": { # Contains the information for an automated `repair rollout` operation. # Output only. Repairs a failed 'Rollout'. + "currentRepairPhaseIndex": "A String", # Output only. The index of the current repair action in the repair sequence. "jobId": "A String", # Output only. The job ID for the Job to repair. "phaseId": "A String", # Output only. The phase ID of the phase that includes the job being repaired. "repairPhases": [ # Output only. Records of the repair attempts. Each repair phase may have multiple retry attempts or single rollback attempt. @@ -415,6 +450,7 @@

Method Details

}, "rollback": { # RollbackAttempt represents an action of rolling back a Cloud Deploy 'Target'. # Output only. Rollback attempt for rollback repair mode . "destinationPhase": "A String", # Output only. The phase to which the rollout will be rolled back to. + "disableRollbackIfRolloutPending": True or False, # Output only. If active rollout exists on the target, abort this rollback. "rolloutId": "A String", # Output only. ID of the rollback `Rollout` to create. "state": "A String", # Output only. Valid state of this rollback action. "stateDesc": "A String", # Output only. Description of the state of the Rollback. diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html index ce543ebfd7d..d96a70d0f27 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.automations.html @@ -168,12 +168,28 @@

Method Details

"jobs": [ # Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in `source_phase`. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. "A String", ], + "phases": [ # Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + "A String", + ], + "repairPhases": [ # Required. Defines the types of automatic repair phases for failed jobs. + { # Configuration of the repair phase. + "retry": { # Retries the failed job. # Optional. Retries a failed job. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. + "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. + }, + "rollback": { # Rolls back a `Rollout`. # Optional. Rolls back a `Rollout`. + "destinationPhase": "A String", # Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase. + "disableRollbackIfRolloutPending": True or False, # Optional. If pending rollout exists on the target, the rollback operation will be aborted. + }, + }, + ], }, }, ], "selector": { # AutomationResourceSelector contains the information to select the resources to which an Automation is going to be applied. # Required. Selected resources to which the automation will be applied. "targets": [ # Contains attributes about a target. - { # Contains criteria for selecting Targets. + { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -330,12 +346,28 @@

Method Details

"jobs": [ # Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in `source_phase`. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. "A String", ], + "phases": [ # Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + "A String", + ], + "repairPhases": [ # Required. Defines the types of automatic repair phases for failed jobs. + { # Configuration of the repair phase. + "retry": { # Retries the failed job. # Optional. Retries a failed job. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. + "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. + }, + "rollback": { # Rolls back a `Rollout`. # Optional. Rolls back a `Rollout`. + "destinationPhase": "A String", # Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase. + "disableRollbackIfRolloutPending": True or False, # Optional. If pending rollout exists on the target, the rollback operation will be aborted. + }, + }, + ], }, }, ], "selector": { # AutomationResourceSelector contains the information to select the resources to which an Automation is going to be applied. # Required. Selected resources to which the automation will be applied. "targets": [ # Contains attributes about a target. - { # Contains criteria for selecting Targets. + { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -428,12 +460,28 @@

Method Details

"jobs": [ # Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in `source_phase`. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. "A String", ], + "phases": [ # Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + "A String", + ], + "repairPhases": [ # Required. Defines the types of automatic repair phases for failed jobs. + { # Configuration of the repair phase. + "retry": { # Retries the failed job. # Optional. Retries a failed job. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. + "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. + }, + "rollback": { # Rolls back a `Rollout`. # Optional. Rolls back a `Rollout`. + "destinationPhase": "A String", # Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase. + "disableRollbackIfRolloutPending": True or False, # Optional. If pending rollout exists on the target, the rollback operation will be aborted. + }, + }, + ], }, }, ], "selector": { # AutomationResourceSelector contains the information to select the resources to which an Automation is going to be applied. # Required. Selected resources to which the automation will be applied. "targets": [ # Contains attributes about a target. - { # Contains criteria for selecting Targets. + { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -535,12 +583,28 @@

Method Details

"jobs": [ # Optional. Jobs to repair. Proceeds only after job name matched any one in the list, or for all jobs if unspecified or empty. The phase that includes the job must match the phase ID specified in `source_phase`. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. "A String", ], + "phases": [ # Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`. + "A String", + ], + "repairPhases": [ # Required. Defines the types of automatic repair phases for failed jobs. + { # Configuration of the repair phase. + "retry": { # Retries the failed job. # Optional. Retries a failed job. + "attempts": "A String", # Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10. + "backoffMode": "A String", # Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0. + "wait": "A String", # Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d. + }, + "rollback": { # Rolls back a `Rollout`. # Optional. Rolls back a `Rollout`. + "destinationPhase": "A String", # Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase. + "disableRollbackIfRolloutPending": True or False, # Optional. If pending rollout exists on the target, the rollback operation will be aborted. + }, + }, + ], }, }, ], "selector": { # AutomationResourceSelector contains the information to select the resources to which an Automation is going to be applied. # Required. Selected resources to which the automation will be applied. "targets": [ # Contains attributes about a target. - { # Contains criteria for selecting Targets. + { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html index 8c6aec2e379..209429f3a06 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.html @@ -923,6 +923,7 @@

Method Details

"releaseId": "A String", # Optional. ID of the `Release` to roll back to. If this isn't specified, the previous successful `Rollout` to the specified target will be used to determine the `Release`. "rollbackConfig": { # Configs for the Rollback rollout. # Optional. Configs for the rollback `Rollout`. "rollout": { # A `Rollout` resource in the Cloud Deploy API. A `Rollout` contains information around a specific deployment to a `Target`. # Optional. The rollback `Rollout` to create. + "activeRepairAutomationRun": "A String", # Output only. The AutomationRun actively repairing the rollout. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -1152,6 +1153,7 @@

Method Details

{ # The response object from `RollbackTarget`. "rollbackConfig": { # Configs for the Rollback rollout. # The config of the rollback `Rollout` created or will be created. "rollout": { # A `Rollout` resource in the Cloud Deploy API. A `Rollout` contains information around a specific deployment to a `Target`. # Optional. The rollback `Rollout` to create. + "activeRepairAutomationRun": "A String", # Output only. The AutomationRun actively repairing the rollout. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html index 2592f30efd8..c26929a3db6 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deliveryPipelines.releases.rollouts.html @@ -208,6 +208,7 @@

Method Details

The object takes the form of: { # A `Rollout` resource in the Cloud Deploy API. A `Rollout` contains information around a specific deployment to a `Target`. + "activeRepairAutomationRun": "A String", # Output only. The AutomationRun actively repairing the rollout. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -468,6 +469,7 @@

Method Details

An object of the form: { # A `Rollout` resource in the Cloud Deploy API. A `Rollout` contains information around a specific deployment to a `Target`. + "activeRepairAutomationRun": "A String", # Output only. The AutomationRun actively repairing the rollout. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -731,6 +733,7 @@

Method Details

"nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. "rollouts": [ # The `Rollout` objects. { # A `Rollout` resource in the Cloud Deploy API. A `Rollout` contains information around a specific deployment to a `Target`. + "activeRepairAutomationRun": "A String", # Output only. The AutomationRun actively repairing the rollout. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. See https://google.aip.dev/128#annotations for more details such as format and size limitations. "a_key": "A String", }, diff --git a/docs/dyn/clouddeploy_v1.projects.locations.deployPolicies.html b/docs/dyn/clouddeploy_v1.projects.locations.deployPolicies.html index 95da0117bb5..c20ae553ced 100644 --- a/docs/dyn/clouddeploy_v1.projects.locations.deployPolicies.html +++ b/docs/dyn/clouddeploy_v1.projects.locations.deployPolicies.html @@ -110,7 +110,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation driven actions within a Delivery Pipeline or Target. +{ # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation-driven actions within a Delivery Pipeline or Target. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -131,7 +131,7 @@

Method Details

"invokers": [ # Optional. What invoked the action. If left empty, all invoker types will be restricted. "A String", ], - "timeWindows": { # Time windows within which actions are restricted. # Required. Time window within which actions are restricted. + "timeWindows": { # Time windows within which actions are restricted. See the [documentation](https://cloud.google.com/deploy/docs/deploy-policy#dates_times) for more information on how to configure dates/times. # Required. Time window within which actions are restricted. "oneTimeWindows": [ # Optional. One-time windows within which actions are restricted. { # One-time window within which actions are restricted. For example, blocking actions over New Year's Eve from December 31st at 5pm to January 1st at 9am. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. End date. @@ -190,7 +190,7 @@

Method Details

"a_key": "A String", }, }, - "target": { # Contains criteria for selecting Targets. # Optional. Contains attributes about a target. + "target": { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. # Optional. Contains attributes about a target. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -288,7 +288,7 @@

Method Details

Returns: An object of the form: - { # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation driven actions within a Delivery Pipeline or Target. + { # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation-driven actions within a Delivery Pipeline or Target. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -309,7 +309,7 @@

Method Details

"invokers": [ # Optional. What invoked the action. If left empty, all invoker types will be restricted. "A String", ], - "timeWindows": { # Time windows within which actions are restricted. # Required. Time window within which actions are restricted. + "timeWindows": { # Time windows within which actions are restricted. See the [documentation](https://cloud.google.com/deploy/docs/deploy-policy#dates_times) for more information on how to configure dates/times. # Required. Time window within which actions are restricted. "oneTimeWindows": [ # Optional. One-time windows within which actions are restricted. { # One-time window within which actions are restricted. For example, blocking actions over New Year's Eve from December 31st at 5pm to January 1st at 9am. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. End date. @@ -368,7 +368,7 @@

Method Details

"a_key": "A String", }, }, - "target": { # Contains criteria for selecting Targets. # Optional. Contains attributes about a target. + "target": { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. # Optional. Contains attributes about a target. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -402,7 +402,7 @@

Method Details

{ # The response object from `ListDeployPolicies`. "deployPolicies": [ # The `DeployPolicy` objects. - { # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation driven actions within a Delivery Pipeline or Target. + { # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation-driven actions within a Delivery Pipeline or Target. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -423,7 +423,7 @@

Method Details

"invokers": [ # Optional. What invoked the action. If left empty, all invoker types will be restricted. "A String", ], - "timeWindows": { # Time windows within which actions are restricted. # Required. Time window within which actions are restricted. + "timeWindows": { # Time windows within which actions are restricted. See the [documentation](https://cloud.google.com/deploy/docs/deploy-policy#dates_times) for more information on how to configure dates/times. # Required. Time window within which actions are restricted. "oneTimeWindows": [ # Optional. One-time windows within which actions are restricted. { # One-time window within which actions are restricted. For example, blocking actions over New Year's Eve from December 31st at 5pm to January 1st at 9am. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. End date. @@ -482,7 +482,7 @@

Method Details

"a_key": "A String", }, }, - "target": { # Contains criteria for selecting Targets. # Optional. Contains attributes about a target. + "target": { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. # Optional. Contains attributes about a target. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", @@ -525,7 +525,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation driven actions within a Delivery Pipeline or Target. +{ # A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation-driven actions within a Delivery Pipeline or Target. "annotations": { # User annotations. These attributes can only be set and used by the user, and not by Cloud Deploy. Annotations must meet the following constraints: * Annotations are key/value pairs. * Valid annotation keys have two segments: an optional prefix and name, separated by a slash (`/`). * The name segment is required and must be 63 characters or less, beginning and ending with an alphanumeric character (`[a-z0-9A-Z]`) with dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. * The prefix is optional. If specified, the prefix must be a DNS subdomain: a series of DNS labels separated by dots(`.`), not longer than 253 characters in total, followed by a slash (`/`). See https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set for more details. "a_key": "A String", }, @@ -546,7 +546,7 @@

Method Details

"invokers": [ # Optional. What invoked the action. If left empty, all invoker types will be restricted. "A String", ], - "timeWindows": { # Time windows within which actions are restricted. # Required. Time window within which actions are restricted. + "timeWindows": { # Time windows within which actions are restricted. See the [documentation](https://cloud.google.com/deploy/docs/deploy-policy#dates_times) for more information on how to configure dates/times. # Required. Time window within which actions are restricted. "oneTimeWindows": [ # Optional. One-time windows within which actions are restricted. { # One-time window within which actions are restricted. For example, blocking actions over New Year's Eve from December 31st at 5pm to January 1st at 9am. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. End date. @@ -605,7 +605,7 @@

Method Details

"a_key": "A String", }, }, - "target": { # Contains criteria for selecting Targets. # Optional. Contains attributes about a target. + "target": { # Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation. # Optional. Contains attributes about a target. "id": "A String", # ID of the `Target`. The value of this field could be one of the following: * The last segment of a target name * "*", all targets in a location "labels": { # Target labels. "a_key": "A String", diff --git a/docs/dyn/cloudidentity_v1beta1.groups.html b/docs/dyn/cloudidentity_v1beta1.groups.html index 431fe14c914..60110d7c2a1 100644 --- a/docs/dyn/cloudidentity_v1beta1.groups.html +++ b/docs/dyn/cloudidentity_v1beta1.groups.html @@ -161,7 +161,7 @@

Method Details

"name": "A String", # Output only. The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Group`. Shall be of the form `groups/{group_id}`. "parent": "A String", # Required. Immutable. The resource name of the entity under which this `Group` resides in the Cloud Identity resource hierarchy. Must be of the form `identitysources/{identity_source}` for external [identity-mapped groups](https://support.google.com/a/answer/9039510) or `customers/{customer_id}` for Google Groups. The `customer_id` must begin with "C" (for example, 'C046psxkn'). [Find your customer ID.] (https://support.google.com/cloudidentity/answer/10070793) "posixGroups": [ # Optional. The POSIX groups associated with the `Group`. - { # POSIX Group definition to represent a group in a POSIX compliant system. + { # POSIX Group definition to represent a group in a POSIX compliant system. Caution: POSIX groups are deprecated. As of September 26, 2024, you can no longer create new POSIX groups. For more information, see https://cloud.google.com/identity/docs/deprecations/posix-groups "gid": "A String", # GID of the POSIX group. "name": "A String", # Name of the POSIX group. "systemId": "A String", # System identifier for which group name and gid apply to. If not specified it will default to empty value. @@ -285,7 +285,7 @@

Method Details

"name": "A String", # Output only. The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Group`. Shall be of the form `groups/{group_id}`. "parent": "A String", # Required. Immutable. The resource name of the entity under which this `Group` resides in the Cloud Identity resource hierarchy. Must be of the form `identitysources/{identity_source}` for external [identity-mapped groups](https://support.google.com/a/answer/9039510) or `customers/{customer_id}` for Google Groups. The `customer_id` must begin with "C" (for example, 'C046psxkn'). [Find your customer ID.] (https://support.google.com/cloudidentity/answer/10070793) "posixGroups": [ # Optional. The POSIX groups associated with the `Group`. - { # POSIX Group definition to represent a group in a POSIX compliant system. + { # POSIX Group definition to represent a group in a POSIX compliant system. Caution: POSIX groups are deprecated. As of September 26, 2024, you can no longer create new POSIX groups. For more information, see https://cloud.google.com/identity/docs/deprecations/posix-groups "gid": "A String", # GID of the POSIX group. "name": "A String", # Name of the POSIX group. "systemId": "A String", # System identifier for which group name and gid apply to. If not specified it will default to empty value. @@ -376,7 +376,7 @@

Method Details

"name": "A String", # Output only. The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Group`. Shall be of the form `groups/{group_id}`. "parent": "A String", # Required. Immutable. The resource name of the entity under which this `Group` resides in the Cloud Identity resource hierarchy. Must be of the form `identitysources/{identity_source}` for external [identity-mapped groups](https://support.google.com/a/answer/9039510) or `customers/{customer_id}` for Google Groups. The `customer_id` must begin with "C" (for example, 'C046psxkn'). [Find your customer ID.] (https://support.google.com/cloudidentity/answer/10070793) "posixGroups": [ # Optional. The POSIX groups associated with the `Group`. - { # POSIX Group definition to represent a group in a POSIX compliant system. + { # POSIX Group definition to represent a group in a POSIX compliant system. Caution: POSIX groups are deprecated. As of September 26, 2024, you can no longer create new POSIX groups. For more information, see https://cloud.google.com/identity/docs/deprecations/posix-groups "gid": "A String", # GID of the POSIX group. "name": "A String", # Name of the POSIX group. "systemId": "A String", # System identifier for which group name and gid apply to. If not specified it will default to empty value. @@ -464,7 +464,7 @@

Method Details

"name": "A String", # Output only. The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Group`. Shall be of the form `groups/{group_id}`. "parent": "A String", # Required. Immutable. The resource name of the entity under which this `Group` resides in the Cloud Identity resource hierarchy. Must be of the form `identitysources/{identity_source}` for external [identity-mapped groups](https://support.google.com/a/answer/9039510) or `customers/{customer_id}` for Google Groups. The `customer_id` must begin with "C" (for example, 'C046psxkn'). [Find your customer ID.] (https://support.google.com/cloudidentity/answer/10070793) "posixGroups": [ # Optional. The POSIX groups associated with the `Group`. - { # POSIX Group definition to represent a group in a POSIX compliant system. + { # POSIX Group definition to represent a group in a POSIX compliant system. Caution: POSIX groups are deprecated. As of September 26, 2024, you can no longer create new POSIX groups. For more information, see https://cloud.google.com/identity/docs/deprecations/posix-groups "gid": "A String", # GID of the POSIX group. "name": "A String", # Name of the POSIX group. "systemId": "A String", # System identifier for which group name and gid apply to. If not specified it will default to empty value. @@ -558,7 +558,7 @@

Method Details

"name": "A String", # Output only. The [resource name](https://cloud.google.com/apis/design/resource_names) of the `Group`. Shall be of the form `groups/{group_id}`. "parent": "A String", # Required. Immutable. The resource name of the entity under which this `Group` resides in the Cloud Identity resource hierarchy. Must be of the form `identitysources/{identity_source}` for external [identity-mapped groups](https://support.google.com/a/answer/9039510) or `customers/{customer_id}` for Google Groups. The `customer_id` must begin with "C" (for example, 'C046psxkn'). [Find your customer ID.] (https://support.google.com/cloudidentity/answer/10070793) "posixGroups": [ # Optional. The POSIX groups associated with the `Group`. - { # POSIX Group definition to represent a group in a POSIX compliant system. + { # POSIX Group definition to represent a group in a POSIX compliant system. Caution: POSIX groups are deprecated. As of September 26, 2024, you can no longer create new POSIX groups. For more information, see https://cloud.google.com/identity/docs/deprecations/posix-groups "gid": "A String", # GID of the POSIX group. "name": "A String", # Name of the POSIX group. "systemId": "A String", # System identifier for which group name and gid apply to. If not specified it will default to empty value. diff --git a/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html b/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html index 7c6b1424787..8d9af360b3a 100644 --- a/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html +++ b/docs/dyn/cloudkms_v1.projects.locations.keyHandles.html @@ -168,7 +168,7 @@

Method Details

Args: parent: string, Required. Name of the resource project and location from which to list KeyHandles, e.g. `projects/{PROJECT_ID}/locations/{LOCATION}`. (required) filter: string, Optional. Filter to apply when listing KeyHandles, e.g. `resource_type_selector="{SERVICE}.googleapis.com/{TYPE}"`. - pageSize: integer, Optional. Optional limit on the number of KeyHandles to include in the response. The service may return fewer than this value. Further KeyHandles can subsequently be obtained by including the ListKeyHandlesResponse.next_page_token in a subsequent request. If unspecified, at most KeyHandles 100 will be returned. + pageSize: integer, Optional. Optional limit on the number of KeyHandles to include in the response. The service may return fewer than this value. Further KeyHandles can subsequently be obtained by including the ListKeyHandlesResponse.next_page_token in a subsequent request. If unspecified, at most 100 KeyHandles will be returned. pageToken: string, Optional. Optional pagination token, returned earlier via ListKeyHandlesResponse.next_page_token. x__xgafv: string, V1 error format. Allowed values diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html index 23aedd0ad16..4f4652a9c3f 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.html +++ b/docs/dyn/container_v1.projects.locations.clusters.html @@ -647,6 +647,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -879,6 +880,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1151,6 +1153,20 @@

Method Details

"statusMessage": "A String", # Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available. "subnetwork": "A String", # The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -1691,6 +1707,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1923,6 +1940,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2195,6 +2213,20 @@

Method Details

"statusMessage": "A String", # Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available. "subnetwork": "A String", # The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -2638,6 +2670,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2870,6 +2903,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3142,6 +3176,20 @@

Method Details

"statusMessage": "A String", # Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available. "subnetwork": "A String", # The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -4413,6 +4461,20 @@

Method Details

"A String", ], }, + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, }, "zone": "A String", # Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field. } diff --git a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html index 686661fa7e2..ea1295d6730 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html @@ -209,6 +209,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -630,6 +631,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -917,6 +919,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index 62e42b9b857..6b56fc67b72 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -735,6 +735,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -967,6 +968,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1239,6 +1241,20 @@

Method Details

"statusMessage": "A String", # Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available. "subnetwork": "A String", # The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -1779,6 +1795,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2011,6 +2028,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2283,6 +2301,20 @@

Method Details

"statusMessage": "A String", # Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available. "subnetwork": "A String", # The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -2770,6 +2802,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3002,6 +3035,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3274,6 +3308,20 @@

Method Details

"statusMessage": "A String", # Output only. Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available. "subnetwork": "A String", # The name of the Google Compute Engine [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the cluster is connected. "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -4440,6 +4488,20 @@

Method Details

"A String", ], }, + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, }, "zone": "A String", # Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field. } diff --git a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html index 9765857df0d..b78ebafff08 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html @@ -274,6 +274,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -695,6 +696,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -982,6 +984,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageLocalSsdConfig": { # EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs. # Parameters for the node ephemeral storage using Local SSDs. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html index b11f0e14d18..0d1f3bb8b55 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html @@ -672,6 +672,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -926,6 +927,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1229,6 +1231,20 @@

Method Details

"useServiceNetworking": True or False, # Whether to use service networking for Cloud TPU or not. }, "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -1802,6 +1818,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2056,6 +2073,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2359,6 +2377,20 @@

Method Details

"useServiceNetworking": True or False, # Whether to use service networking for Cloud TPU or not. }, "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -2835,6 +2867,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3089,6 +3122,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3392,6 +3426,20 @@

Method Details

"useServiceNetworking": True or False, # Whether to use service networking for Cloud TPU or not. }, "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -4739,6 +4787,20 @@

Method Details

"A String", ], }, + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, }, "zone": "A String", # Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field. } diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html index f916388f798..888172d2e1f 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html @@ -210,6 +210,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -648,6 +649,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -952,6 +954,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html index 550d1ceea36..74456b54ac5 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html @@ -767,6 +767,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1021,6 +1022,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1324,6 +1326,20 @@

Method Details

"useServiceNetworking": True or False, # Whether to use service networking for Cloud TPU or not. }, "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -1897,6 +1913,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2151,6 +2168,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -2454,6 +2472,20 @@

Method Details

"useServiceNetworking": True or False, # Whether to use service networking for Cloud TPU or not. }, "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -2974,6 +3006,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3228,6 +3261,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -3531,6 +3565,20 @@

Method Details

"useServiceNetworking": True or False, # Whether to use service networking for Cloud TPU or not. }, "tpuIpv4CidrBlock": "A String", # Output only. The IP address range of the Cloud TPUs in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, "verticalPodAutoscaling": { # VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it. # Cluster-level Vertical Pod Autoscaling configuration. "enabled": True or False, # Enables vertical pod autoscaling. }, @@ -4766,6 +4814,20 @@

Method Details

"A String", ], }, + "userManagedKeysConfig": { # UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster. # The Custom keys configuration for the cluster. + "aggregationCa": "A String", # The Certificate Authority Service caPool to use for the aggregation CA in this cluster. + "clusterCa": "A String", # The Certificate Authority Service caPool to use for the cluster CA in this cluster. + "controlPlaneDiskEncryptionKey": "A String", # The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes. + "etcdApiCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster. + "etcdPeerCa": "A String", # Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster. + "gkeopsEtcdBackupEncryptionKey": "A String", # Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups. + "serviceAccountSigningKeys": [ # The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + "serviceAccountVerificationKeys": [ # The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}` + "A String", + ], + }, }, "zone": "A String", # Required. Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field. } diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html index f84d9d9d2c4..6041ea6ed92 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html @@ -275,6 +275,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -713,6 +714,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. @@ -1017,6 +1019,7 @@

Method Details

}, "diskSizeGb": 42, # Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB. "diskType": "A String", # Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard' + "effectiveCgroupMode": "A String", # Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version. "enableConfidentialStorage": True or False, # Optional. Reserved for future use. "ephemeralStorageConfig": { # EphemeralStorageConfig contains configuration for the ephemeral storage filesystem. # Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. "localSsdCount": 42, # Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. The limit for this value is dependent upon the maximum number of disk available on a machine per zone. See: https://cloud.google.com/compute/docs/disks/local-ssd for more information. A zero (or unset) value has different meanings depending on machine type being used: 1. For pre-Gen3 machines, which support flexible numbers of local ssds, zero (or unset) means to disable using local SSDs as ephemeral storage. 2. For Gen3 machines which dictate a specific number of local ssds, zero (or unset) means to use the default number of local ssds that goes with that machine type. For example, for a c3-standard-8-lssd machine, 2 local ssds would be provisioned. For c3-standard-8 (which doesn't support local ssds), 0 will be provisioned. See https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds for more info. diff --git a/docs/dyn/datamigration_v1.projects.locations.migrationJobs.html b/docs/dyn/datamigration_v1.projects.locations.migrationJobs.html index 34332a9c2a7..e20e12db42b 100644 --- a/docs/dyn/datamigration_v1.projects.locations.migrationJobs.html +++ b/docs/dyn/datamigration_v1.projects.locations.migrationJobs.html @@ -190,6 +190,28 @@

Method Details

"a_key": "A String", }, "name": "A String", # The name (URI) of this migration job resource, in the form of: projects/{project}/locations/{location}/migrationJobs/{migrationJob}. + "oracleToPostgresConfig": { # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. + "oracleSourceConfig": { # Configuration for Oracle as a source in a migration. # Optional. Configuration for Oracle source. + "binaryLogParser": { # Configuration to use Binary Log Parser CDC technique. # Use Binary Log Parser. + "logFileDirectories": { # Configuration to specify the Oracle directories to access the log files. # Use Oracle directories. + "archivedLogDirectory": "A String", # Required. Oracle directory for archived logs. + "onlineLogDirectory": "A String", # Required. Oracle directory for online logs. + }, + "oracleAsmLogFileAccess": { # Configuration to use Oracle ASM to access the log files. # Use Oracle ASM. + }, + }, + "cdcStartPosition": "A String", # Optional. The schema change number (SCN) to start CDC data migration from. + "logMiner": { # Configuration to use LogMiner CDC method. # Use LogMiner. + }, + "maxConcurrentCdcConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for CDC phase. + "maxConcurrentFullDumpConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for full dump phase. + "skipFullDump": True or False, # Optional. Whether to skip full dump or not. + }, + "postgresDestinationConfig": { # Configuration for Postgres as a destination in a migration. # Optional. Configuration for Postgres destination. + "maxConcurrentConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the destination for data migration. + "transactionTimeout": "A String", # Optional. Timeout for data migration transactions. + }, + }, "performanceConfig": { # Performance configuration definition. # Optional. Data dump parallelism settings used by the migration. "dumpParallelLevel": "A String", # Initial dump parallelism level. }, @@ -456,6 +478,28 @@

Method Details

"a_key": "A String", }, "name": "A String", # The name (URI) of this migration job resource, in the form of: projects/{project}/locations/{location}/migrationJobs/{migrationJob}. + "oracleToPostgresConfig": { # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. + "oracleSourceConfig": { # Configuration for Oracle as a source in a migration. # Optional. Configuration for Oracle source. + "binaryLogParser": { # Configuration to use Binary Log Parser CDC technique. # Use Binary Log Parser. + "logFileDirectories": { # Configuration to specify the Oracle directories to access the log files. # Use Oracle directories. + "archivedLogDirectory": "A String", # Required. Oracle directory for archived logs. + "onlineLogDirectory": "A String", # Required. Oracle directory for online logs. + }, + "oracleAsmLogFileAccess": { # Configuration to use Oracle ASM to access the log files. # Use Oracle ASM. + }, + }, + "cdcStartPosition": "A String", # Optional. The schema change number (SCN) to start CDC data migration from. + "logMiner": { # Configuration to use LogMiner CDC method. # Use LogMiner. + }, + "maxConcurrentCdcConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for CDC phase. + "maxConcurrentFullDumpConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for full dump phase. + "skipFullDump": True or False, # Optional. Whether to skip full dump or not. + }, + "postgresDestinationConfig": { # Configuration for Postgres as a destination in a migration. # Optional. Configuration for Postgres destination. + "maxConcurrentConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the destination for data migration. + "transactionTimeout": "A String", # Optional. Timeout for data migration transactions. + }, + }, "performanceConfig": { # Performance configuration definition. # Optional. Data dump parallelism settings used by the migration. "dumpParallelLevel": "A String", # Initial dump parallelism level. }, @@ -604,6 +648,28 @@

Method Details

"a_key": "A String", }, "name": "A String", # The name (URI) of this migration job resource, in the form of: projects/{project}/locations/{location}/migrationJobs/{migrationJob}. + "oracleToPostgresConfig": { # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. + "oracleSourceConfig": { # Configuration for Oracle as a source in a migration. # Optional. Configuration for Oracle source. + "binaryLogParser": { # Configuration to use Binary Log Parser CDC technique. # Use Binary Log Parser. + "logFileDirectories": { # Configuration to specify the Oracle directories to access the log files. # Use Oracle directories. + "archivedLogDirectory": "A String", # Required. Oracle directory for archived logs. + "onlineLogDirectory": "A String", # Required. Oracle directory for online logs. + }, + "oracleAsmLogFileAccess": { # Configuration to use Oracle ASM to access the log files. # Use Oracle ASM. + }, + }, + "cdcStartPosition": "A String", # Optional. The schema change number (SCN) to start CDC data migration from. + "logMiner": { # Configuration to use LogMiner CDC method. # Use LogMiner. + }, + "maxConcurrentCdcConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for CDC phase. + "maxConcurrentFullDumpConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for full dump phase. + "skipFullDump": True or False, # Optional. Whether to skip full dump or not. + }, + "postgresDestinationConfig": { # Configuration for Postgres as a destination in a migration. # Optional. Configuration for Postgres destination. + "maxConcurrentConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the destination for data migration. + "transactionTimeout": "A String", # Optional. Timeout for data migration transactions. + }, + }, "performanceConfig": { # Performance configuration definition. # Optional. Data dump parallelism settings used by the migration. "dumpParallelLevel": "A String", # Initial dump parallelism level. }, @@ -713,6 +779,28 @@

Method Details

"a_key": "A String", }, "name": "A String", # The name (URI) of this migration job resource, in the form of: projects/{project}/locations/{location}/migrationJobs/{migrationJob}. + "oracleToPostgresConfig": { # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. + "oracleSourceConfig": { # Configuration for Oracle as a source in a migration. # Optional. Configuration for Oracle source. + "binaryLogParser": { # Configuration to use Binary Log Parser CDC technique. # Use Binary Log Parser. + "logFileDirectories": { # Configuration to specify the Oracle directories to access the log files. # Use Oracle directories. + "archivedLogDirectory": "A String", # Required. Oracle directory for archived logs. + "onlineLogDirectory": "A String", # Required. Oracle directory for online logs. + }, + "oracleAsmLogFileAccess": { # Configuration to use Oracle ASM to access the log files. # Use Oracle ASM. + }, + }, + "cdcStartPosition": "A String", # Optional. The schema change number (SCN) to start CDC data migration from. + "logMiner": { # Configuration to use LogMiner CDC method. # Use LogMiner. + }, + "maxConcurrentCdcConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for CDC phase. + "maxConcurrentFullDumpConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for full dump phase. + "skipFullDump": True or False, # Optional. Whether to skip full dump or not. + }, + "postgresDestinationConfig": { # Configuration for Postgres as a destination in a migration. # Optional. Configuration for Postgres destination. + "maxConcurrentConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the destination for data migration. + "transactionTimeout": "A String", # Optional. Timeout for data migration transactions. + }, + }, "performanceConfig": { # Performance configuration definition. # Optional. Data dump parallelism settings used by the migration. "dumpParallelLevel": "A String", # Initial dump parallelism level. }, @@ -1156,6 +1244,28 @@

Method Details

"a_key": "A String", }, "name": "A String", # The name (URI) of this migration job resource, in the form of: projects/{project}/locations/{location}/migrationJobs/{migrationJob}. + "oracleToPostgresConfig": { # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. # Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations. + "oracleSourceConfig": { # Configuration for Oracle as a source in a migration. # Optional. Configuration for Oracle source. + "binaryLogParser": { # Configuration to use Binary Log Parser CDC technique. # Use Binary Log Parser. + "logFileDirectories": { # Configuration to specify the Oracle directories to access the log files. # Use Oracle directories. + "archivedLogDirectory": "A String", # Required. Oracle directory for archived logs. + "onlineLogDirectory": "A String", # Required. Oracle directory for online logs. + }, + "oracleAsmLogFileAccess": { # Configuration to use Oracle ASM to access the log files. # Use Oracle ASM. + }, + }, + "cdcStartPosition": "A String", # Optional. The schema change number (SCN) to start CDC data migration from. + "logMiner": { # Configuration to use LogMiner CDC method. # Use LogMiner. + }, + "maxConcurrentCdcConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for CDC phase. + "maxConcurrentFullDumpConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the source for full dump phase. + "skipFullDump": True or False, # Optional. Whether to skip full dump or not. + }, + "postgresDestinationConfig": { # Configuration for Postgres as a destination in a migration. # Optional. Configuration for Postgres destination. + "maxConcurrentConnections": 42, # Optional. Maximum number of connections Database Migration Service will open to the destination for data migration. + "transactionTimeout": "A String", # Optional. Timeout for data migration transactions. + }, + }, "performanceConfig": { # Performance configuration definition. # Optional. Data dump parallelism settings used by the migration. "dumpParallelLevel": "A String", # Initial dump parallelism level. }, diff --git a/docs/dyn/dataplex_v1.projects.locations.entryGroups.entries.html b/docs/dyn/dataplex_v1.projects.locations.entryGroups.entries.html index 5eb35420dcc..6b06ae4f575 100644 --- a/docs/dyn/dataplex_v1.projects.locations.entryGroups.entries.html +++ b/docs/dyn/dataplex_v1.projects.locations.entryGroups.entries.html @@ -464,7 +464,7 @@

Method Details

} allowMissing: boolean, Optional. If set to true and the entry doesn't exist, the service will create it. - aspectKeys: string, Optional. The map keys of the Aspects which the service should modify. It supports the following syntaxes: - matches an aspect of the given type and empty path. @path - matches an aspect of the given type and specified path. * - matches aspects of the given type for all paths. *@path - matches aspects of all types on the given path.The service will not remove existing aspects matching the syntax unless delete_missing_aspects is set to true.If this field is left empty, the service treats it as specifying exactly those Aspects present in the request. (repeated) + aspectKeys: string, Optional. The map keys of the Aspects which the service should modify. It supports the following syntaxes: - matches an aspect of the given type and empty path. @path - matches an aspect of the given type and specified path. For example, to attach an aspect to a field that is specified by the schema aspect, the path should have the format Schema.. * - matches aspects of the given type for all paths. *@path - matches aspects of all types on the given path.The service will not remove existing aspects matching the syntax unless delete_missing_aspects is set to true.If this field is left empty, the service treats it as specifying exactly those Aspects present in the request. (repeated) deleteMissingAspects: boolean, Optional. If set to true and the aspect_keys specify aspect ranges, the service deletes any existing aspects from that range that weren't provided in the request. updateMask: string, Optional. Mask of fields to update. To update Aspects, the update_mask must contain the value "aspects".If the update_mask is empty, the service will update all modifiable fields present in the request. x__xgafv: string, V1 error format. diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.html b/docs/dyn/dataproc_v1.projects.locations.batches.html index 7c858c90a2e..314e3ac6179 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.html @@ -74,6 +74,11 @@

Cloud Dataproc API . projects . locations . batches

Instance Methods

+

+ sparkApplications() +

+

Returns the sparkApplications Resource.

+

analyze(name, body=None, x__xgafv=None)

Analyze a Batch for possible recommendations and insights.

diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html b/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html new file mode 100644 index 00000000000..50e91bb0907 --- /dev/null +++ b/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html @@ -0,0 +1,4334 @@ + + + +

Cloud Dataproc API . projects . locations . batches . sparkApplications

+

Instance Methods

+

+ access(name, parent=None, x__xgafv=None)

+

Obtain high level information corresponding to a single Spark Application.

+

+ accessEnvironmentInfo(name, parent=None, x__xgafv=None)

+

Obtain environment details for a Spark Application

+

+ accessJob(name, jobId=None, parent=None, x__xgafv=None)

+

Obtain data corresponding to a spark job for a Spark Application.

+

+ accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None)

+

Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.

+

+ accessSqlQuery(name, details=None, executionId=None, parent=None, planDescription=None, x__xgafv=None)

+

Obtain data corresponding to a particular SQL Query for a Spark Application.

+

+ accessStageAttempt(name, parent=None, stageAttemptId=None, stageId=None, summaryMetricsMask=None, x__xgafv=None)

+

Obtain data corresponding to a spark stage attempt for a Spark Application.

+

+ accessStageRddGraph(name, parent=None, stageId=None, x__xgafv=None)

+

Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.

+

+ close()

+

Close httplib2 connections.

+

+ search(parent, applicationStatus=None, maxEndTime=None, maxTime=None, minEndTime=None, minTime=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Obtain high level information and list of Spark Applications corresponding to a batch

+

+ searchExecutorStageSummary(name, pageSize=None, pageToken=None, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None)

+

Obtain executor summary with respect to a spark stage attempt.

+

+ searchExecutorStageSummary_next()

+

Retrieves the next page of results.

+

+ searchExecutors(name, executorStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

+

Obtain data corresponding to executors for a Spark Application.

+

+ searchExecutors_next()

+

Retrieves the next page of results.

+

+ searchJobs(name, jobStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

+

Obtain list of spark jobs corresponding to a Spark Application.

+

+ searchJobs_next()

+

Retrieves the next page of results.

+

+ searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None)

+

Obtain data corresponding to SQL Queries for a Spark Application.

+

+ searchSqlQueries_next()

+

Retrieves the next page of results.

+

+ searchStageAttemptTasks(name, pageSize=None, pageToken=None, parent=None, sortRuntime=None, stageAttemptId=None, stageId=None, taskStatus=None, x__xgafv=None)

+

Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.

+

+ searchStageAttemptTasks_next()

+

Retrieves the next page of results.

+

+ searchStageAttempts(name, pageSize=None, pageToken=None, parent=None, stageId=None, summaryMetricsMask=None, x__xgafv=None)

+

Obtain data corresponding to a spark stage attempts for a Spark Application.

+

+ searchStageAttempts_next()

+

Retrieves the next page of results.

+

+ searchStages(name, pageSize=None, pageToken=None, parent=None, stageStatus=None, summaryMetricsMask=None, x__xgafv=None)

+

Obtain data corresponding to stages for a Spark Application.

+

+ searchStages_next()

+

Retrieves the next page of results.

+

+ search_next()

+

Retrieves the next page of results.

+

+ summarizeExecutors(name, parent=None, x__xgafv=None)

+

Obtain summary of Executor Summary for a Spark Application

+

+ summarizeJobs(name, parent=None, x__xgafv=None)

+

Obtain summary of Jobs for a Spark Application

+

+ summarizeStageAttemptTasks(name, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None)

+

Obtain summary of Tasks for a Spark Application Stage Attempt

+

+ summarizeStages(name, parent=None, x__xgafv=None)

+

Obtain summary of Stages for a Spark Application

+

+ write(name, body=None, x__xgafv=None)

+

Write wrapper objects from dataplane to spanner

+

Method Details

+
+ access(name, parent=None, x__xgafv=None) +
Obtain high level information corresponding to a single Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A summary of Spark Application
+  "application": { # High level information corresponding to an application. # Output only. High level information corresponding to an application.
+    "applicationContextIngestionStatus": "A String",
+    "applicationId": "A String",
+    "attempts": [
+      { # Specific attempt of an application.
+        "appSparkVersion": "A String",
+        "attemptId": "A String",
+        "completed": True or False,
+        "durationMillis": "A String",
+        "endTime": "A String",
+        "lastUpdated": "A String",
+        "sparkUser": "A String",
+        "startTime": "A String",
+      },
+    ],
+    "coresGranted": 42,
+    "coresPerExecutor": 42,
+    "maxCores": 42,
+    "memoryPerExecutorMb": 42,
+    "name": "A String",
+    "quantileDataStatus": "A String",
+  },
+}
+
+ +
+ accessEnvironmentInfo(name, parent=None, x__xgafv=None) +
Obtain environment details for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Environment details of a Saprk Application.
+  "applicationEnvironmentInfo": { # Details about the Environment that the application is running in. # Details about the Environment that the application is running in.
+    "classpathEntries": {
+      "a_key": "A String",
+    },
+    "hadoopProperties": {
+      "a_key": "A String",
+    },
+    "metricsProperties": {
+      "a_key": "A String",
+    },
+    "resourceProfiles": [
+      { # Resource profile that contains information about all the resources required by executors and tasks.
+        "executorResources": {
+          "a_key": { # Resources used per executor used by the application.
+            "amount": "A String",
+            "discoveryScript": "A String",
+            "resourceName": "A String",
+            "vendor": "A String",
+          },
+        },
+        "resourceProfileId": 42,
+        "taskResources": {
+          "a_key": { # Resources used per task created by the application.
+            "amount": 3.14,
+            "resourceName": "A String",
+          },
+        },
+      },
+    ],
+    "runtime": {
+      "javaHome": "A String",
+      "javaVersion": "A String",
+      "scalaVersion": "A String",
+    },
+    "sparkProperties": {
+      "a_key": "A String",
+    },
+    "systemProperties": {
+      "a_key": "A String",
+    },
+  },
+}
+
+ +
+ accessJob(name, jobId=None, parent=None, x__xgafv=None) +
Obtain data corresponding to a spark job for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  jobId: string, Required. Job ID to fetch data for.
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Details of a particular job associated with Spark Application
+  "jobData": { # Data corresponding to a spark job. # Output only. Data corresponding to a spark job.
+    "completionTime": "A String",
+    "description": "A String",
+    "jobGroup": "A String",
+    "jobId": "A String",
+    "killTasksSummary": {
+      "a_key": 42,
+    },
+    "name": "A String",
+    "numActiveStages": 42,
+    "numActiveTasks": 42,
+    "numCompletedIndices": 42,
+    "numCompletedStages": 42,
+    "numCompletedTasks": 42,
+    "numFailedStages": 42,
+    "numFailedTasks": 42,
+    "numKilledTasks": 42,
+    "numSkippedStages": 42,
+    "numSkippedTasks": 42,
+    "numTasks": 42,
+    "skippedStages": [
+      42,
+    ],
+    "sqlExecutionId": "A String",
+    "stageIds": [
+      "A String",
+    ],
+    "status": "A String",
+    "submissionTime": "A String",
+  },
+}
+
+ +
+ accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None) +
Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  executionId: string, Required. Execution ID
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # SparkPlanGraph for a Spark Application execution limited to maximum 10000 clusters.
+  "sparkPlanGraph": { # A graph used for storing information of an executionPlan of DataFrame. # SparkPlanGraph for a Spark Application execution.
+    "edges": [
+      { # Represents a directed edge in the spark plan tree from child to parent.
+        "fromId": "A String",
+        "toId": "A String",
+      },
+    ],
+    "executionId": "A String",
+    "nodes": [
+      { # Wrapper user to represent either a node or a cluster.
+        "cluster": { # Represents a tree of spark plan.
+          "desc": "A String",
+          "metrics": [
+            { # Metrics related to SQL execution.
+              "accumulatorId": "A String",
+              "metricType": "A String",
+              "name": "A String",
+            },
+          ],
+          "name": "A String",
+          "nodes": [
+            # Object with schema name: SparkPlanGraphNodeWrapper
+          ],
+          "sparkPlanGraphClusterId": "A String",
+        },
+        "node": { # Represents a node in the spark plan tree.
+          "desc": "A String",
+          "metrics": [
+            { # Metrics related to SQL execution.
+              "accumulatorId": "A String",
+              "metricType": "A String",
+              "name": "A String",
+            },
+          ],
+          "name": "A String",
+          "sparkPlanGraphNodeId": "A String",
+        },
+      },
+    ],
+  },
+}
+
+ +
+ accessSqlQuery(name, details=None, executionId=None, parent=None, planDescription=None, x__xgafv=None) +
Obtain data corresponding to a particular SQL Query for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  details: boolean, Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.
+  executionId: string, Required. Execution ID
+  parent: string, Required. Parent (Batch) resource reference.
+  planDescription: boolean, Optional. Enables/ disables physical plan description on demand
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Details of a query for a Spark Application
+  "executionData": { # SQL Execution Data # SQL Execution Data
+    "completionTime": "A String",
+    "description": "A String",
+    "details": "A String",
+    "errorMessage": "A String",
+    "executionId": "A String",
+    "jobs": {
+      "a_key": "A String",
+    },
+    "metricValues": {
+      "a_key": "A String",
+    },
+    "metricValuesIsNull": True or False,
+    "metrics": [
+      { # Metrics related to SQL execution.
+        "accumulatorId": "A String",
+        "metricType": "A String",
+        "name": "A String",
+      },
+    ],
+    "modifiedConfigs": {
+      "a_key": "A String",
+    },
+    "physicalPlanDescription": "A String",
+    "rootExecutionId": "A String",
+    "stages": [
+      "A String",
+    ],
+    "submissionTime": "A String",
+  },
+}
+
+ +
+ accessStageAttempt(name, parent=None, stageAttemptId=None, stageId=None, summaryMetricsMask=None, x__xgafv=None) +
Obtain data corresponding to a spark stage attempt for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  stageAttemptId: integer, Required. Stage Attempt ID
+  stageId: string, Required. Stage ID
+  summaryMetricsMask: string, Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Stage Attempt for a Stage of a Spark Application
+  "stageData": { # Data corresponding to a stage. # Output only. Data corresponding to a stage.
+    "accumulatorUpdates": [
+      {
+        "accumullableInfoId": "A String",
+        "name": "A String",
+        "update": "A String",
+        "value": "A String",
+      },
+    ],
+    "completionTime": "A String",
+    "description": "A String",
+    "details": "A String",
+    "executorMetricsDistributions": {
+      "diskBytesSpilled": [
+        3.14,
+      ],
+      "failedTasks": [
+        3.14,
+      ],
+      "inputBytes": [
+        3.14,
+      ],
+      "inputRecords": [
+        3.14,
+      ],
+      "killedTasks": [
+        3.14,
+      ],
+      "memoryBytesSpilled": [
+        3.14,
+      ],
+      "outputBytes": [
+        3.14,
+      ],
+      "outputRecords": [
+        3.14,
+      ],
+      "peakMemoryMetrics": {
+        "executorMetrics": [
+          {
+            "metrics": {
+              "a_key": "A String",
+            },
+          },
+        ],
+        "quantiles": [
+          3.14,
+        ],
+      },
+      "quantiles": [
+        3.14,
+      ],
+      "shuffleRead": [
+        3.14,
+      ],
+      "shuffleReadRecords": [
+        3.14,
+      ],
+      "shuffleWrite": [
+        3.14,
+      ],
+      "shuffleWriteRecords": [
+        3.14,
+      ],
+      "succeededTasks": [
+        3.14,
+      ],
+      "taskTimeMillis": [
+        3.14,
+      ],
+    },
+    "executorSummary": {
+      "a_key": { # Executor resources consumed by a stage.
+        "diskBytesSpilled": "A String",
+        "executorId": "A String",
+        "failedTasks": 42,
+        "inputBytes": "A String",
+        "inputRecords": "A String",
+        "isExcludedForStage": True or False,
+        "killedTasks": 42,
+        "memoryBytesSpilled": "A String",
+        "outputBytes": "A String",
+        "outputRecords": "A String",
+        "peakMemoryMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "shuffleRead": "A String",
+        "shuffleReadRecords": "A String",
+        "shuffleWrite": "A String",
+        "shuffleWriteRecords": "A String",
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "succeededTasks": 42,
+        "taskTimeMillis": "A String",
+      },
+    },
+    "failureReason": "A String",
+    "firstTaskLaunchedTime": "A String",
+    "isShufflePushEnabled": True or False,
+    "jobIds": [
+      "A String",
+    ],
+    "killedTasksSummary": {
+      "a_key": 42,
+    },
+    "locality": {
+      "a_key": "A String",
+    },
+    "name": "A String",
+    "numActiveTasks": 42,
+    "numCompleteTasks": 42,
+    "numCompletedIndices": 42,
+    "numFailedTasks": 42,
+    "numKilledTasks": 42,
+    "numTasks": 42,
+    "parentStageIds": [
+      "A String",
+    ],
+    "peakExecutorMetrics": {
+      "metrics": {
+        "a_key": "A String",
+      },
+    },
+    "rddIds": [
+      "A String",
+    ],
+    "resourceProfileId": 42,
+    "schedulingPool": "A String",
+    "shuffleMergersCount": 42,
+    "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+      "numActiveTasks": 42,
+      "numCompletedTasks": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numTasks": 42,
+      "stageAttemptId": 42,
+      "stageId": "A String",
+    },
+    "stageAttemptId": 42,
+    "stageId": "A String",
+    "stageMetrics": { # Stage Level Aggregated Metrics
+      "diskBytesSpilled": "A String",
+      "executorCpuTimeNanos": "A String",
+      "executorDeserializeCpuTimeNanos": "A String",
+      "executorDeserializeTimeMillis": "A String",
+      "executorRunTimeMillis": "A String",
+      "jvmGcTimeMillis": "A String",
+      "memoryBytesSpilled": "A String",
+      "peakExecutionMemoryBytes": "A String",
+      "resultSerializationTimeMillis": "A String",
+      "resultSize": "A String",
+      "stageInputMetrics": { # Metrics about the input read by the stage.
+        "bytesRead": "A String",
+        "recordsRead": "A String",
+      },
+      "stageOutputMetrics": { # Metrics about the output written by the stage.
+        "bytesWritten": "A String",
+        "recordsWritten": "A String",
+      },
+      "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+        "bytesRead": "A String",
+        "fetchWaitTimeMillis": "A String",
+        "localBlocksFetched": "A String",
+        "localBytesRead": "A String",
+        "recordsRead": "A String",
+        "remoteBlocksFetched": "A String",
+        "remoteBytesRead": "A String",
+        "remoteBytesReadToDisk": "A String",
+        "remoteReqsDuration": "A String",
+        "stageShufflePushReadMetrics": {
+          "corruptMergedBlockChunks": "A String",
+          "localMergedBlocksFetched": "A String",
+          "localMergedBytesRead": "A String",
+          "localMergedChunksFetched": "A String",
+          "mergedFetchFallbackCount": "A String",
+          "remoteMergedBlocksFetched": "A String",
+          "remoteMergedBytesRead": "A String",
+          "remoteMergedChunksFetched": "A String",
+          "remoteMergedReqsDuration": "A String",
+        },
+      },
+      "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+        "bytesWritten": "A String",
+        "recordsWritten": "A String",
+        "writeTimeNanos": "A String",
+      },
+    },
+    "status": "A String",
+    "submissionTime": "A String",
+    "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+      "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "inputMetrics": {
+        "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+      "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "outputMetrics": {
+        "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+      "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "shuffleReadMetrics": {
+        "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "shufflePushReadMetrics": {
+          "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+      "shuffleWriteMetrics": {
+        "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+    },
+    "tasks": {
+      "a_key": { # Data corresponding to tasks created by spark.
+        "accumulatorUpdates": [
+          {
+            "accumullableInfoId": "A String",
+            "name": "A String",
+            "update": "A String",
+            "value": "A String",
+          },
+        ],
+        "attempt": 42,
+        "durationMillis": "A String",
+        "errorMessage": "A String",
+        "executorId": "A String",
+        "executorLogs": {
+          "a_key": "A String",
+        },
+        "gettingResultTimeMillis": "A String",
+        "hasMetrics": True or False,
+        "host": "A String",
+        "index": 42,
+        "launchTime": "A String",
+        "partitionId": 42,
+        "resultFetchStart": "A String",
+        "schedulerDelayMillis": "A String",
+        "speculative": True or False,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "status": "A String",
+        "taskId": "A String",
+        "taskLocality": "A String",
+        "taskMetrics": { # Executor Task Metrics
+          "diskBytesSpilled": "A String",
+          "executorCpuTimeNanos": "A String",
+          "executorDeserializeCpuTimeNanos": "A String",
+          "executorDeserializeTimeMillis": "A String",
+          "executorRunTimeMillis": "A String",
+          "inputMetrics": { # Metrics about the input data read by the task.
+            "bytesRead": "A String",
+            "recordsRead": "A String",
+          },
+          "jvmGcTimeMillis": "A String",
+          "memoryBytesSpilled": "A String",
+          "outputMetrics": { # Metrics about the data written by the task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+          },
+          "peakExecutionMemoryBytes": "A String",
+          "resultSerializationTimeMillis": "A String",
+          "resultSize": "A String",
+          "shuffleReadMetrics": { # Shuffle data read by the task.
+            "fetchWaitTimeMillis": "A String",
+            "localBlocksFetched": "A String",
+            "localBytesRead": "A String",
+            "recordsRead": "A String",
+            "remoteBlocksFetched": "A String",
+            "remoteBytesRead": "A String",
+            "remoteBytesReadToDisk": "A String",
+            "remoteReqsDuration": "A String",
+            "shufflePushReadMetrics": {
+              "corruptMergedBlockChunks": "A String",
+              "localMergedBlocksFetched": "A String",
+              "localMergedBytesRead": "A String",
+              "localMergedChunksFetched": "A String",
+              "mergedFetchFallbackCount": "A String",
+              "remoteMergedBlocksFetched": "A String",
+              "remoteMergedBytesRead": "A String",
+              "remoteMergedChunksFetched": "A String",
+              "remoteMergedReqsDuration": "A String",
+            },
+          },
+          "shuffleWriteMetrics": { # Shuffle data written by task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+            "writeTimeNanos": "A String",
+          },
+        },
+      },
+    },
+  },
+}
+
+ +
+ accessStageRddGraph(name, parent=None, stageId=None, x__xgafv=None) +
Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  stageId: string, Required. Stage ID
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # RDD operation graph for a Spark Application Stage limited to maximum 10000 clusters.
+  "rddOperationGraph": { # Graph representing RDD dependencies. Consists of edges and a root cluster. # RDD operation graph for a Spark Application Stage.
+    "edges": [
+      { # A directed edge representing dependency between two RDDs.
+        "fromId": 42,
+        "toId": 42,
+      },
+    ],
+    "incomingEdges": [
+      { # A directed edge representing dependency between two RDDs.
+        "fromId": 42,
+        "toId": 42,
+      },
+    ],
+    "outgoingEdges": [
+      { # A directed edge representing dependency between two RDDs.
+        "fromId": 42,
+        "toId": 42,
+      },
+    ],
+    "rootCluster": { # A grouping of nodes representing higher level constructs (stage, job etc.).
+      "childClusters": [
+        # Object with schema name: RddOperationCluster
+      ],
+      "childNodes": [
+        { # A node in the RDD operation graph. Corresponds to a single RDD.
+          "barrier": True or False,
+          "cached": True or False,
+          "callsite": "A String",
+          "name": "A String",
+          "nodeId": 42,
+          "outputDeterministicLevel": "A String",
+        },
+      ],
+      "name": "A String",
+      "rddClusterId": "A String",
+    },
+    "stageId": "A String",
+  },
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ search(parent, applicationStatus=None, maxEndTime=None, maxTime=None, minEndTime=None, minTime=None, pageSize=None, pageToken=None, x__xgafv=None) +
Obtain high level information and list of Spark Applications corresponding to a batch
+
+Args:
+  parent: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID" (required)
+  applicationStatus: string, Optional. Search only applications in the chosen state.
+    Allowed values
+      APPLICATION_STATUS_UNSPECIFIED - 
+      APPLICATION_STATUS_RUNNING - 
+      APPLICATION_STATUS_COMPLETED - 
+  maxEndTime: string, Optional. Latest end timestamp to list.
+  maxTime: string, Optional. Latest start timestamp to list.
+  minEndTime: string, Optional. Earliest end timestamp to list.
+  minTime: string, Optional. Earliest start timestamp to list.
+  pageSize: integer, Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSparkApplications call. Provide this token to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of summary of Spark Applications
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationsRequest.
+  "sparkApplications": [ # Output only. High level information corresponding to an application.
+    { # A summary of Spark Application
+      "application": { # High level information corresponding to an application. # Output only. High level information corresponding to an application.
+        "applicationContextIngestionStatus": "A String",
+        "applicationId": "A String",
+        "attempts": [
+          { # Specific attempt of an application.
+            "appSparkVersion": "A String",
+            "attemptId": "A String",
+            "completed": True or False,
+            "durationMillis": "A String",
+            "endTime": "A String",
+            "lastUpdated": "A String",
+            "sparkUser": "A String",
+            "startTime": "A String",
+          },
+        ],
+        "coresGranted": 42,
+        "coresPerExecutor": 42,
+        "maxCores": 42,
+        "memoryPerExecutorMb": 42,
+        "name": "A String",
+        "quantileDataStatus": "A String",
+      },
+      "name": "A String", # Identifier. Name of the spark application
+    },
+  ],
+}
+
+ +
+ searchExecutorStageSummary(name, pageSize=None, pageToken=None, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None) +
Obtain executor summary with respect to a spark stage attempt.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  stageAttemptId: integer, Required. Stage Attempt ID
+  stageId: string, Required. Stage ID
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of Executors associated with a Spark Application Stage.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest.
+  "sparkApplicationStageExecutors": [ # Details about executors used by the application stage.
+    { # Executor resources consumed by a stage.
+      "diskBytesSpilled": "A String",
+      "executorId": "A String",
+      "failedTasks": 42,
+      "inputBytes": "A String",
+      "inputRecords": "A String",
+      "isExcludedForStage": True or False,
+      "killedTasks": 42,
+      "memoryBytesSpilled": "A String",
+      "outputBytes": "A String",
+      "outputRecords": "A String",
+      "peakMemoryMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "shuffleRead": "A String",
+      "shuffleReadRecords": "A String",
+      "shuffleWrite": "A String",
+      "shuffleWriteRecords": "A String",
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "succeededTasks": 42,
+      "taskTimeMillis": "A String",
+    },
+  ],
+}
+
+ +
+ searchExecutorStageSummary_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchExecutors(name, executorStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None) +
Obtain data corresponding to executors for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  executorStatus: string, Optional. Filter to select whether active/ dead or all executors should be selected.
+    Allowed values
+      EXECUTOR_STATUS_UNSPECIFIED - 
+      EXECUTOR_STATUS_ACTIVE - 
+      EXECUTOR_STATUS_DEAD - 
+  pageSize: integer, Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of Executors associated with a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest.
+  "sparkApplicationExecutors": [ # Details about executors used by the application.
+    { # Details about executors used by the application.
+      "activeTasks": 42,
+      "addTime": "A String",
+      "attributes": {
+        "a_key": "A String",
+      },
+      "completedTasks": 42,
+      "diskUsed": "A String",
+      "excludedInStages": [
+        "A String",
+      ],
+      "executorId": "A String",
+      "executorLogs": {
+        "a_key": "A String",
+      },
+      "failedTasks": 42,
+      "hostPort": "A String",
+      "isActive": True or False,
+      "isExcluded": True or False,
+      "maxMemory": "A String",
+      "maxTasks": 42,
+      "memoryMetrics": {
+        "totalOffHeapStorageMemory": "A String",
+        "totalOnHeapStorageMemory": "A String",
+        "usedOffHeapStorageMemory": "A String",
+        "usedOnHeapStorageMemory": "A String",
+      },
+      "memoryUsed": "A String",
+      "peakMemoryMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "rddBlocks": 42,
+      "removeReason": "A String",
+      "removeTime": "A String",
+      "resourceProfileId": 42,
+      "resources": {
+        "a_key": {
+          "addresses": [
+            "A String",
+          ],
+          "name": "A String",
+        },
+      },
+      "totalCores": 42,
+      "totalDurationMillis": "A String",
+      "totalGcTimeMillis": "A String",
+      "totalInputBytes": "A String",
+      "totalShuffleRead": "A String",
+      "totalShuffleWrite": "A String",
+      "totalTasks": 42,
+    },
+  ],
+}
+
+ +
+ searchExecutors_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchJobs(name, jobStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None) +
Obtain list of spark jobs corresponding to a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  jobStatus: string, Optional. List only jobs in the specific state.
+    Allowed values
+      JOB_EXECUTION_STATUS_UNSPECIFIED - 
+      JOB_EXECUTION_STATUS_RUNNING - 
+      JOB_EXECUTION_STATUS_SUCCEEDED - 
+      JOB_EXECUTION_STATUS_FAILED - 
+      JOB_EXECUTION_STATUS_UNKNOWN - 
+  pageSize: integer, Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSparkApplicationJobs call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of Jobs associated with a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationJobsRequest.
+  "sparkApplicationJobs": [ # Output only. Data corresponding to a spark job.
+    { # Data corresponding to a spark job.
+      "completionTime": "A String",
+      "description": "A String",
+      "jobGroup": "A String",
+      "jobId": "A String",
+      "killTasksSummary": {
+        "a_key": 42,
+      },
+      "name": "A String",
+      "numActiveStages": 42,
+      "numActiveTasks": 42,
+      "numCompletedIndices": 42,
+      "numCompletedStages": 42,
+      "numCompletedTasks": 42,
+      "numFailedStages": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numSkippedStages": 42,
+      "numSkippedTasks": 42,
+      "numTasks": 42,
+      "skippedStages": [
+        42,
+      ],
+      "sqlExecutionId": "A String",
+      "stageIds": [
+        "A String",
+      ],
+      "status": "A String",
+      "submissionTime": "A String",
+    },
+  ],
+}
+
+ +
+ searchJobs_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None) +
Obtain data corresponding to SQL Queries for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  details: boolean, Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.
+  pageSize: integer, Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  planDescription: boolean, Optional. Enables/ disables physical plan description on demand
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of all queries for a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationSqlQueriesRequest.
+  "sparkApplicationSqlQueries": [ # Output only. SQL Execution Data
+    { # SQL Execution Data
+      "completionTime": "A String",
+      "description": "A String",
+      "details": "A String",
+      "errorMessage": "A String",
+      "executionId": "A String",
+      "jobs": {
+        "a_key": "A String",
+      },
+      "metricValues": {
+        "a_key": "A String",
+      },
+      "metricValuesIsNull": True or False,
+      "metrics": [
+        { # Metrics related to SQL execution.
+          "accumulatorId": "A String",
+          "metricType": "A String",
+          "name": "A String",
+        },
+      ],
+      "modifiedConfigs": {
+        "a_key": "A String",
+      },
+      "physicalPlanDescription": "A String",
+      "rootExecutionId": "A String",
+      "stages": [
+        "A String",
+      ],
+      "submissionTime": "A String",
+    },
+  ],
+}
+
+ +
+ searchSqlQueries_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchStageAttemptTasks(name, pageSize=None, pageToken=None, parent=None, sortRuntime=None, stageAttemptId=None, stageId=None, taskStatus=None, x__xgafv=None) +
Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous ListSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  sortRuntime: boolean, Optional. Sort the tasks by runtime.
+  stageAttemptId: integer, Optional. Stage Attempt ID
+  stageId: string, Optional. Stage ID
+  taskStatus: string, Optional. List only tasks in the state.
+    Allowed values
+      TASK_STATUS_UNSPECIFIED - 
+      TASK_STATUS_RUNNING - 
+      TASK_STATUS_SUCCESS - 
+      TASK_STATUS_FAILED - 
+      TASK_STATUS_KILLED - 
+      TASK_STATUS_PENDING - 
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of tasks for a stage of a Spark Application
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest.
+  "sparkApplicationStageAttemptTasks": [ # Output only. Data corresponding to tasks created by spark.
+    { # Data corresponding to tasks created by spark.
+      "accumulatorUpdates": [
+        {
+          "accumullableInfoId": "A String",
+          "name": "A String",
+          "update": "A String",
+          "value": "A String",
+        },
+      ],
+      "attempt": 42,
+      "durationMillis": "A String",
+      "errorMessage": "A String",
+      "executorId": "A String",
+      "executorLogs": {
+        "a_key": "A String",
+      },
+      "gettingResultTimeMillis": "A String",
+      "hasMetrics": True or False,
+      "host": "A String",
+      "index": 42,
+      "launchTime": "A String",
+      "partitionId": 42,
+      "resultFetchStart": "A String",
+      "schedulerDelayMillis": "A String",
+      "speculative": True or False,
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "status": "A String",
+      "taskId": "A String",
+      "taskLocality": "A String",
+      "taskMetrics": { # Executor Task Metrics
+        "diskBytesSpilled": "A String",
+        "executorCpuTimeNanos": "A String",
+        "executorDeserializeCpuTimeNanos": "A String",
+        "executorDeserializeTimeMillis": "A String",
+        "executorRunTimeMillis": "A String",
+        "inputMetrics": { # Metrics about the input data read by the task.
+          "bytesRead": "A String",
+          "recordsRead": "A String",
+        },
+        "jvmGcTimeMillis": "A String",
+        "memoryBytesSpilled": "A String",
+        "outputMetrics": { # Metrics about the data written by the task.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+        },
+        "peakExecutionMemoryBytes": "A String",
+        "resultSerializationTimeMillis": "A String",
+        "resultSize": "A String",
+        "shuffleReadMetrics": { # Shuffle data read by the task.
+          "fetchWaitTimeMillis": "A String",
+          "localBlocksFetched": "A String",
+          "localBytesRead": "A String",
+          "recordsRead": "A String",
+          "remoteBlocksFetched": "A String",
+          "remoteBytesRead": "A String",
+          "remoteBytesReadToDisk": "A String",
+          "remoteReqsDuration": "A String",
+          "shufflePushReadMetrics": {
+            "corruptMergedBlockChunks": "A String",
+            "localMergedBlocksFetched": "A String",
+            "localMergedBytesRead": "A String",
+            "localMergedChunksFetched": "A String",
+            "mergedFetchFallbackCount": "A String",
+            "remoteMergedBlocksFetched": "A String",
+            "remoteMergedBytesRead": "A String",
+            "remoteMergedChunksFetched": "A String",
+            "remoteMergedReqsDuration": "A String",
+          },
+        },
+        "shuffleWriteMetrics": { # Shuffle data written by task.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+          "writeTimeNanos": "A String",
+        },
+      },
+    },
+  ],
+}
+
+ +
+ searchStageAttemptTasks_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchStageAttempts(name, pageSize=None, pageToken=None, parent=None, stageId=None, summaryMetricsMask=None, x__xgafv=None) +
Obtain data corresponding to a spark stage attempts for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  stageId: string, Required. Stage ID for which attempts are to be fetched
+  summaryMetricsMask: string, Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of Stage Attempts for a Stage of a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptsRequest.
+  "sparkApplicationStageAttempts": [ # Output only. Data corresponding to a stage attempts
+    { # Data corresponding to a stage.
+      "accumulatorUpdates": [
+        {
+          "accumullableInfoId": "A String",
+          "name": "A String",
+          "update": "A String",
+          "value": "A String",
+        },
+      ],
+      "completionTime": "A String",
+      "description": "A String",
+      "details": "A String",
+      "executorMetricsDistributions": {
+        "diskBytesSpilled": [
+          3.14,
+        ],
+        "failedTasks": [
+          3.14,
+        ],
+        "inputBytes": [
+          3.14,
+        ],
+        "inputRecords": [
+          3.14,
+        ],
+        "killedTasks": [
+          3.14,
+        ],
+        "memoryBytesSpilled": [
+          3.14,
+        ],
+        "outputBytes": [
+          3.14,
+        ],
+        "outputRecords": [
+          3.14,
+        ],
+        "peakMemoryMetrics": {
+          "executorMetrics": [
+            {
+              "metrics": {
+                "a_key": "A String",
+              },
+            },
+          ],
+          "quantiles": [
+            3.14,
+          ],
+        },
+        "quantiles": [
+          3.14,
+        ],
+        "shuffleRead": [
+          3.14,
+        ],
+        "shuffleReadRecords": [
+          3.14,
+        ],
+        "shuffleWrite": [
+          3.14,
+        ],
+        "shuffleWriteRecords": [
+          3.14,
+        ],
+        "succeededTasks": [
+          3.14,
+        ],
+        "taskTimeMillis": [
+          3.14,
+        ],
+      },
+      "executorSummary": {
+        "a_key": { # Executor resources consumed by a stage.
+          "diskBytesSpilled": "A String",
+          "executorId": "A String",
+          "failedTasks": 42,
+          "inputBytes": "A String",
+          "inputRecords": "A String",
+          "isExcludedForStage": True or False,
+          "killedTasks": 42,
+          "memoryBytesSpilled": "A String",
+          "outputBytes": "A String",
+          "outputRecords": "A String",
+          "peakMemoryMetrics": {
+            "metrics": {
+              "a_key": "A String",
+            },
+          },
+          "shuffleRead": "A String",
+          "shuffleReadRecords": "A String",
+          "shuffleWrite": "A String",
+          "shuffleWriteRecords": "A String",
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "succeededTasks": 42,
+          "taskTimeMillis": "A String",
+        },
+      },
+      "failureReason": "A String",
+      "firstTaskLaunchedTime": "A String",
+      "isShufflePushEnabled": True or False,
+      "jobIds": [
+        "A String",
+      ],
+      "killedTasksSummary": {
+        "a_key": 42,
+      },
+      "locality": {
+        "a_key": "A String",
+      },
+      "name": "A String",
+      "numActiveTasks": 42,
+      "numCompleteTasks": 42,
+      "numCompletedIndices": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numTasks": 42,
+      "parentStageIds": [
+        "A String",
+      ],
+      "peakExecutorMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "rddIds": [
+        "A String",
+      ],
+      "resourceProfileId": 42,
+      "schedulingPool": "A String",
+      "shuffleMergersCount": 42,
+      "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+        "numActiveTasks": 42,
+        "numCompletedTasks": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+      },
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "stageMetrics": { # Stage Level Aggregated Metrics
+        "diskBytesSpilled": "A String",
+        "executorCpuTimeNanos": "A String",
+        "executorDeserializeCpuTimeNanos": "A String",
+        "executorDeserializeTimeMillis": "A String",
+        "executorRunTimeMillis": "A String",
+        "jvmGcTimeMillis": "A String",
+        "memoryBytesSpilled": "A String",
+        "peakExecutionMemoryBytes": "A String",
+        "resultSerializationTimeMillis": "A String",
+        "resultSize": "A String",
+        "stageInputMetrics": { # Metrics about the input read by the stage.
+          "bytesRead": "A String",
+          "recordsRead": "A String",
+        },
+        "stageOutputMetrics": { # Metrics about the output written by the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+        },
+        "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+          "bytesRead": "A String",
+          "fetchWaitTimeMillis": "A String",
+          "localBlocksFetched": "A String",
+          "localBytesRead": "A String",
+          "recordsRead": "A String",
+          "remoteBlocksFetched": "A String",
+          "remoteBytesRead": "A String",
+          "remoteBytesReadToDisk": "A String",
+          "remoteReqsDuration": "A String",
+          "stageShufflePushReadMetrics": {
+            "corruptMergedBlockChunks": "A String",
+            "localMergedBlocksFetched": "A String",
+            "localMergedBytesRead": "A String",
+            "localMergedChunksFetched": "A String",
+            "mergedFetchFallbackCount": "A String",
+            "remoteMergedBlocksFetched": "A String",
+            "remoteMergedBytesRead": "A String",
+            "remoteMergedChunksFetched": "A String",
+            "remoteMergedReqsDuration": "A String",
+          },
+        },
+        "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+          "writeTimeNanos": "A String",
+        },
+      },
+      "status": "A String",
+      "submissionTime": "A String",
+      "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+        "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "inputMetrics": {
+          "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "outputMetrics": {
+          "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "shuffleReadMetrics": {
+          "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "shufflePushReadMetrics": {
+            "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "shuffleWriteMetrics": {
+          "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+      },
+      "tasks": {
+        "a_key": { # Data corresponding to tasks created by spark.
+          "accumulatorUpdates": [
+            {
+              "accumullableInfoId": "A String",
+              "name": "A String",
+              "update": "A String",
+              "value": "A String",
+            },
+          ],
+          "attempt": 42,
+          "durationMillis": "A String",
+          "errorMessage": "A String",
+          "executorId": "A String",
+          "executorLogs": {
+            "a_key": "A String",
+          },
+          "gettingResultTimeMillis": "A String",
+          "hasMetrics": True or False,
+          "host": "A String",
+          "index": 42,
+          "launchTime": "A String",
+          "partitionId": 42,
+          "resultFetchStart": "A String",
+          "schedulerDelayMillis": "A String",
+          "speculative": True or False,
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "status": "A String",
+          "taskId": "A String",
+          "taskLocality": "A String",
+          "taskMetrics": { # Executor Task Metrics
+            "diskBytesSpilled": "A String",
+            "executorCpuTimeNanos": "A String",
+            "executorDeserializeCpuTimeNanos": "A String",
+            "executorDeserializeTimeMillis": "A String",
+            "executorRunTimeMillis": "A String",
+            "inputMetrics": { # Metrics about the input data read by the task.
+              "bytesRead": "A String",
+              "recordsRead": "A String",
+            },
+            "jvmGcTimeMillis": "A String",
+            "memoryBytesSpilled": "A String",
+            "outputMetrics": { # Metrics about the data written by the task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+            },
+            "peakExecutionMemoryBytes": "A String",
+            "resultSerializationTimeMillis": "A String",
+            "resultSize": "A String",
+            "shuffleReadMetrics": { # Shuffle data read by the task.
+              "fetchWaitTimeMillis": "A String",
+              "localBlocksFetched": "A String",
+              "localBytesRead": "A String",
+              "recordsRead": "A String",
+              "remoteBlocksFetched": "A String",
+              "remoteBytesRead": "A String",
+              "remoteBytesReadToDisk": "A String",
+              "remoteReqsDuration": "A String",
+              "shufflePushReadMetrics": {
+                "corruptMergedBlockChunks": "A String",
+                "localMergedBlocksFetched": "A String",
+                "localMergedBytesRead": "A String",
+                "localMergedChunksFetched": "A String",
+                "mergedFetchFallbackCount": "A String",
+                "remoteMergedBlocksFetched": "A String",
+                "remoteMergedBytesRead": "A String",
+                "remoteMergedChunksFetched": "A String",
+                "remoteMergedReqsDuration": "A String",
+              },
+            },
+            "shuffleWriteMetrics": { # Shuffle data written by task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+              "writeTimeNanos": "A String",
+            },
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ searchStageAttempts_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchStages(name, pageSize=None, pageToken=None, parent=None, stageStatus=None, summaryMetricsMask=None, x__xgafv=None) +
Obtain data corresponding to stages for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous FetchSparkApplicationStagesList call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Batch) resource reference.
+  stageStatus: string, Optional. List only stages in the given state.
+    Allowed values
+      STAGE_STATUS_UNSPECIFIED - 
+      STAGE_STATUS_ACTIVE - 
+      STAGE_STATUS_COMPLETE - 
+      STAGE_STATUS_FAILED - 
+      STAGE_STATUS_PENDING - 
+      STAGE_STATUS_SKIPPED - 
+  summaryMetricsMask: string, Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of stages associated with a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationStages.
+  "sparkApplicationStages": [ # Output only. Data corresponding to a stage.
+    { # Data corresponding to a stage.
+      "accumulatorUpdates": [
+        {
+          "accumullableInfoId": "A String",
+          "name": "A String",
+          "update": "A String",
+          "value": "A String",
+        },
+      ],
+      "completionTime": "A String",
+      "description": "A String",
+      "details": "A String",
+      "executorMetricsDistributions": {
+        "diskBytesSpilled": [
+          3.14,
+        ],
+        "failedTasks": [
+          3.14,
+        ],
+        "inputBytes": [
+          3.14,
+        ],
+        "inputRecords": [
+          3.14,
+        ],
+        "killedTasks": [
+          3.14,
+        ],
+        "memoryBytesSpilled": [
+          3.14,
+        ],
+        "outputBytes": [
+          3.14,
+        ],
+        "outputRecords": [
+          3.14,
+        ],
+        "peakMemoryMetrics": {
+          "executorMetrics": [
+            {
+              "metrics": {
+                "a_key": "A String",
+              },
+            },
+          ],
+          "quantiles": [
+            3.14,
+          ],
+        },
+        "quantiles": [
+          3.14,
+        ],
+        "shuffleRead": [
+          3.14,
+        ],
+        "shuffleReadRecords": [
+          3.14,
+        ],
+        "shuffleWrite": [
+          3.14,
+        ],
+        "shuffleWriteRecords": [
+          3.14,
+        ],
+        "succeededTasks": [
+          3.14,
+        ],
+        "taskTimeMillis": [
+          3.14,
+        ],
+      },
+      "executorSummary": {
+        "a_key": { # Executor resources consumed by a stage.
+          "diskBytesSpilled": "A String",
+          "executorId": "A String",
+          "failedTasks": 42,
+          "inputBytes": "A String",
+          "inputRecords": "A String",
+          "isExcludedForStage": True or False,
+          "killedTasks": 42,
+          "memoryBytesSpilled": "A String",
+          "outputBytes": "A String",
+          "outputRecords": "A String",
+          "peakMemoryMetrics": {
+            "metrics": {
+              "a_key": "A String",
+            },
+          },
+          "shuffleRead": "A String",
+          "shuffleReadRecords": "A String",
+          "shuffleWrite": "A String",
+          "shuffleWriteRecords": "A String",
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "succeededTasks": 42,
+          "taskTimeMillis": "A String",
+        },
+      },
+      "failureReason": "A String",
+      "firstTaskLaunchedTime": "A String",
+      "isShufflePushEnabled": True or False,
+      "jobIds": [
+        "A String",
+      ],
+      "killedTasksSummary": {
+        "a_key": 42,
+      },
+      "locality": {
+        "a_key": "A String",
+      },
+      "name": "A String",
+      "numActiveTasks": 42,
+      "numCompleteTasks": 42,
+      "numCompletedIndices": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numTasks": 42,
+      "parentStageIds": [
+        "A String",
+      ],
+      "peakExecutorMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "rddIds": [
+        "A String",
+      ],
+      "resourceProfileId": 42,
+      "schedulingPool": "A String",
+      "shuffleMergersCount": 42,
+      "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+        "numActiveTasks": 42,
+        "numCompletedTasks": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+      },
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "stageMetrics": { # Stage Level Aggregated Metrics
+        "diskBytesSpilled": "A String",
+        "executorCpuTimeNanos": "A String",
+        "executorDeserializeCpuTimeNanos": "A String",
+        "executorDeserializeTimeMillis": "A String",
+        "executorRunTimeMillis": "A String",
+        "jvmGcTimeMillis": "A String",
+        "memoryBytesSpilled": "A String",
+        "peakExecutionMemoryBytes": "A String",
+        "resultSerializationTimeMillis": "A String",
+        "resultSize": "A String",
+        "stageInputMetrics": { # Metrics about the input read by the stage.
+          "bytesRead": "A String",
+          "recordsRead": "A String",
+        },
+        "stageOutputMetrics": { # Metrics about the output written by the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+        },
+        "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+          "bytesRead": "A String",
+          "fetchWaitTimeMillis": "A String",
+          "localBlocksFetched": "A String",
+          "localBytesRead": "A String",
+          "recordsRead": "A String",
+          "remoteBlocksFetched": "A String",
+          "remoteBytesRead": "A String",
+          "remoteBytesReadToDisk": "A String",
+          "remoteReqsDuration": "A String",
+          "stageShufflePushReadMetrics": {
+            "corruptMergedBlockChunks": "A String",
+            "localMergedBlocksFetched": "A String",
+            "localMergedBytesRead": "A String",
+            "localMergedChunksFetched": "A String",
+            "mergedFetchFallbackCount": "A String",
+            "remoteMergedBlocksFetched": "A String",
+            "remoteMergedBytesRead": "A String",
+            "remoteMergedChunksFetched": "A String",
+            "remoteMergedReqsDuration": "A String",
+          },
+        },
+        "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+          "writeTimeNanos": "A String",
+        },
+      },
+      "status": "A String",
+      "submissionTime": "A String",
+      "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+        "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "inputMetrics": {
+          "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "outputMetrics": {
+          "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "shuffleReadMetrics": {
+          "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "shufflePushReadMetrics": {
+            "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "shuffleWriteMetrics": {
+          "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+      },
+      "tasks": {
+        "a_key": { # Data corresponding to tasks created by spark.
+          "accumulatorUpdates": [
+            {
+              "accumullableInfoId": "A String",
+              "name": "A String",
+              "update": "A String",
+              "value": "A String",
+            },
+          ],
+          "attempt": 42,
+          "durationMillis": "A String",
+          "errorMessage": "A String",
+          "executorId": "A String",
+          "executorLogs": {
+            "a_key": "A String",
+          },
+          "gettingResultTimeMillis": "A String",
+          "hasMetrics": True or False,
+          "host": "A String",
+          "index": 42,
+          "launchTime": "A String",
+          "partitionId": 42,
+          "resultFetchStart": "A String",
+          "schedulerDelayMillis": "A String",
+          "speculative": True or False,
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "status": "A String",
+          "taskId": "A String",
+          "taskLocality": "A String",
+          "taskMetrics": { # Executor Task Metrics
+            "diskBytesSpilled": "A String",
+            "executorCpuTimeNanos": "A String",
+            "executorDeserializeCpuTimeNanos": "A String",
+            "executorDeserializeTimeMillis": "A String",
+            "executorRunTimeMillis": "A String",
+            "inputMetrics": { # Metrics about the input data read by the task.
+              "bytesRead": "A String",
+              "recordsRead": "A String",
+            },
+            "jvmGcTimeMillis": "A String",
+            "memoryBytesSpilled": "A String",
+            "outputMetrics": { # Metrics about the data written by the task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+            },
+            "peakExecutionMemoryBytes": "A String",
+            "resultSerializationTimeMillis": "A String",
+            "resultSize": "A String",
+            "shuffleReadMetrics": { # Shuffle data read by the task.
+              "fetchWaitTimeMillis": "A String",
+              "localBlocksFetched": "A String",
+              "localBytesRead": "A String",
+              "recordsRead": "A String",
+              "remoteBlocksFetched": "A String",
+              "remoteBytesRead": "A String",
+              "remoteBytesReadToDisk": "A String",
+              "remoteReqsDuration": "A String",
+              "shufflePushReadMetrics": {
+                "corruptMergedBlockChunks": "A String",
+                "localMergedBlocksFetched": "A String",
+                "localMergedBytesRead": "A String",
+                "localMergedChunksFetched": "A String",
+                "mergedFetchFallbackCount": "A String",
+                "remoteMergedBlocksFetched": "A String",
+                "remoteMergedBytesRead": "A String",
+                "remoteMergedChunksFetched": "A String",
+                "remoteMergedReqsDuration": "A String",
+              },
+            },
+            "shuffleWriteMetrics": { # Shuffle data written by task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+              "writeTimeNanos": "A String",
+            },
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ searchStages_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ search_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ summarizeExecutors(name, parent=None, x__xgafv=None) +
Obtain summary of Executor Summary for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Consolidated summary of executors for a Spark Application.
+  "activeExecutorSummary": { # Consolidated summary about executors used by the application. # Consolidated summary for active executors.
+    "activeTasks": 42,
+    "completedTasks": 42,
+    "count": 42,
+    "diskUsed": "A String",
+    "failedTasks": 42,
+    "isExcluded": 42,
+    "maxMemory": "A String",
+    "memoryMetrics": {
+      "totalOffHeapStorageMemory": "A String",
+      "totalOnHeapStorageMemory": "A String",
+      "usedOffHeapStorageMemory": "A String",
+      "usedOnHeapStorageMemory": "A String",
+    },
+    "memoryUsed": "A String",
+    "rddBlocks": 42,
+    "totalCores": 42,
+    "totalDurationMillis": "A String",
+    "totalGcTimeMillis": "A String",
+    "totalInputBytes": "A String",
+    "totalShuffleRead": "A String",
+    "totalShuffleWrite": "A String",
+    "totalTasks": 42,
+  },
+  "applicationId": "A String", # Spark Application Id
+  "deadExecutorSummary": { # Consolidated summary about executors used by the application. # Consolidated summary for dead executors.
+    "activeTasks": 42,
+    "completedTasks": 42,
+    "count": 42,
+    "diskUsed": "A String",
+    "failedTasks": 42,
+    "isExcluded": 42,
+    "maxMemory": "A String",
+    "memoryMetrics": {
+      "totalOffHeapStorageMemory": "A String",
+      "totalOnHeapStorageMemory": "A String",
+      "usedOffHeapStorageMemory": "A String",
+      "usedOnHeapStorageMemory": "A String",
+    },
+    "memoryUsed": "A String",
+    "rddBlocks": 42,
+    "totalCores": 42,
+    "totalDurationMillis": "A String",
+    "totalGcTimeMillis": "A String",
+    "totalInputBytes": "A String",
+    "totalShuffleRead": "A String",
+    "totalShuffleWrite": "A String",
+    "totalTasks": 42,
+  },
+  "totalExecutorSummary": { # Consolidated summary about executors used by the application. # Overall consolidated summary for all executors.
+    "activeTasks": 42,
+    "completedTasks": 42,
+    "count": 42,
+    "diskUsed": "A String",
+    "failedTasks": 42,
+    "isExcluded": 42,
+    "maxMemory": "A String",
+    "memoryMetrics": {
+      "totalOffHeapStorageMemory": "A String",
+      "totalOnHeapStorageMemory": "A String",
+      "usedOffHeapStorageMemory": "A String",
+      "usedOnHeapStorageMemory": "A String",
+    },
+    "memoryUsed": "A String",
+    "rddBlocks": 42,
+    "totalCores": 42,
+    "totalDurationMillis": "A String",
+    "totalGcTimeMillis": "A String",
+    "totalInputBytes": "A String",
+    "totalShuffleRead": "A String",
+    "totalShuffleWrite": "A String",
+    "totalTasks": 42,
+  },
+}
+
+ +
+ summarizeJobs(name, parent=None, x__xgafv=None) +
Obtain summary of Jobs for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Summary of a Spark Application jobs.
+  "jobsSummary": { # Data related to Jobs page summary # Summary of a Spark Application Jobs
+    "activeJobs": 42, # Number of active jobs
+    "applicationId": "A String", # Spark Application Id
+    "attempts": [ # Attempts info
+      { # Specific attempt of an application.
+        "appSparkVersion": "A String",
+        "attemptId": "A String",
+        "completed": True or False,
+        "durationMillis": "A String",
+        "endTime": "A String",
+        "lastUpdated": "A String",
+        "sparkUser": "A String",
+        "startTime": "A String",
+      },
+    ],
+    "completedJobs": 42, # Number of completed jobs
+    "failedJobs": 42, # Number of failed jobs
+    "schedulingMode": "A String", # Spark Scheduling mode
+  },
+}
+
+ +
+ summarizeStageAttemptTasks(name, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None) +
Obtain summary of Tasks for a Spark Application Stage Attempt
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  stageAttemptId: integer, Required. Stage Attempt ID
+  stageId: string, Required. Stage ID
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Summary of tasks for a Spark Application stage attempt.
+  "stageAttemptTasksSummary": { # Data related to tasks summary for a Spark Stage Attempt # Summary of tasks for a Spark Application Stage Attempt
+    "applicationId": "A String",
+    "numFailedTasks": 42,
+    "numKilledTasks": 42,
+    "numPendingTasks": 42,
+    "numRunningTasks": 42,
+    "numSuccessTasks": 42,
+    "numTasks": 42,
+    "stageAttemptId": 42,
+    "stageId": "A String",
+  },
+}
+
+ +
+ summarizeStages(name, parent=None, x__xgafv=None) +
Obtain summary of Stages for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the batch to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Batch) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Summary of a Spark Application stages.
+  "stagesSummary": { # Data related to Stages page summary # Summary of a Spark Application Stages
+    "applicationId": "A String",
+    "numActiveStages": 42,
+    "numCompletedStages": 42,
+    "numFailedStages": 42,
+    "numPendingStages": 42,
+    "numSkippedStages": 42,
+  },
+}
+
+ +
+ write(name, body=None, x__xgafv=None) +
Write wrapper objects from dataplane to spanner
+
+Args:
+  name: string, Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID" (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Write Spark Application data to internal storage systems
+  "parent": "A String", # Required. Parent (Batch) resource reference.
+  "sparkWrapperObjects": [
+    { # Outer message that contains the data obtained from spark listener, packaged with information that is required to process it.
+      "appSummary": {
+        "numCompletedJobs": 42,
+        "numCompletedStages": 42,
+      },
+      "applicationEnvironmentInfo": { # Details about the Environment that the application is running in.
+        "classpathEntries": {
+          "a_key": "A String",
+        },
+        "hadoopProperties": {
+          "a_key": "A String",
+        },
+        "metricsProperties": {
+          "a_key": "A String",
+        },
+        "resourceProfiles": [
+          { # Resource profile that contains information about all the resources required by executors and tasks.
+            "executorResources": {
+              "a_key": { # Resources used per executor used by the application.
+                "amount": "A String",
+                "discoveryScript": "A String",
+                "resourceName": "A String",
+                "vendor": "A String",
+              },
+            },
+            "resourceProfileId": 42,
+            "taskResources": {
+              "a_key": { # Resources used per task created by the application.
+                "amount": 3.14,
+                "resourceName": "A String",
+              },
+            },
+          },
+        ],
+        "runtime": {
+          "javaHome": "A String",
+          "javaVersion": "A String",
+          "scalaVersion": "A String",
+        },
+        "sparkProperties": {
+          "a_key": "A String",
+        },
+        "systemProperties": {
+          "a_key": "A String",
+        },
+      },
+      "applicationId": "A String", # Application Id created by Spark.
+      "applicationInfo": { # High level information corresponding to an application.
+        "applicationContextIngestionStatus": "A String",
+        "applicationId": "A String",
+        "attempts": [
+          { # Specific attempt of an application.
+            "appSparkVersion": "A String",
+            "attemptId": "A String",
+            "completed": True or False,
+            "durationMillis": "A String",
+            "endTime": "A String",
+            "lastUpdated": "A String",
+            "sparkUser": "A String",
+            "startTime": "A String",
+          },
+        ],
+        "coresGranted": 42,
+        "coresPerExecutor": 42,
+        "maxCores": 42,
+        "memoryPerExecutorMb": 42,
+        "name": "A String",
+        "quantileDataStatus": "A String",
+      },
+      "eventTimestamp": "A String", # VM Timestamp associated with the data object.
+      "executorStageSummary": { # Executor resources consumed by a stage.
+        "diskBytesSpilled": "A String",
+        "executorId": "A String",
+        "failedTasks": 42,
+        "inputBytes": "A String",
+        "inputRecords": "A String",
+        "isExcludedForStage": True or False,
+        "killedTasks": 42,
+        "memoryBytesSpilled": "A String",
+        "outputBytes": "A String",
+        "outputRecords": "A String",
+        "peakMemoryMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "shuffleRead": "A String",
+        "shuffleReadRecords": "A String",
+        "shuffleWrite": "A String",
+        "shuffleWriteRecords": "A String",
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "succeededTasks": 42,
+        "taskTimeMillis": "A String",
+      },
+      "executorSummary": { # Details about executors used by the application.
+        "activeTasks": 42,
+        "addTime": "A String",
+        "attributes": {
+          "a_key": "A String",
+        },
+        "completedTasks": 42,
+        "diskUsed": "A String",
+        "excludedInStages": [
+          "A String",
+        ],
+        "executorId": "A String",
+        "executorLogs": {
+          "a_key": "A String",
+        },
+        "failedTasks": 42,
+        "hostPort": "A String",
+        "isActive": True or False,
+        "isExcluded": True or False,
+        "maxMemory": "A String",
+        "maxTasks": 42,
+        "memoryMetrics": {
+          "totalOffHeapStorageMemory": "A String",
+          "totalOnHeapStorageMemory": "A String",
+          "usedOffHeapStorageMemory": "A String",
+          "usedOnHeapStorageMemory": "A String",
+        },
+        "memoryUsed": "A String",
+        "peakMemoryMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "rddBlocks": 42,
+        "removeReason": "A String",
+        "removeTime": "A String",
+        "resourceProfileId": 42,
+        "resources": {
+          "a_key": {
+            "addresses": [
+              "A String",
+            ],
+            "name": "A String",
+          },
+        },
+        "totalCores": 42,
+        "totalDurationMillis": "A String",
+        "totalGcTimeMillis": "A String",
+        "totalInputBytes": "A String",
+        "totalShuffleRead": "A String",
+        "totalShuffleWrite": "A String",
+        "totalTasks": 42,
+      },
+      "jobData": { # Data corresponding to a spark job.
+        "completionTime": "A String",
+        "description": "A String",
+        "jobGroup": "A String",
+        "jobId": "A String",
+        "killTasksSummary": {
+          "a_key": 42,
+        },
+        "name": "A String",
+        "numActiveStages": 42,
+        "numActiveTasks": 42,
+        "numCompletedIndices": 42,
+        "numCompletedStages": 42,
+        "numCompletedTasks": 42,
+        "numFailedStages": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numSkippedStages": 42,
+        "numSkippedTasks": 42,
+        "numTasks": 42,
+        "skippedStages": [
+          42,
+        ],
+        "sqlExecutionId": "A String",
+        "stageIds": [
+          "A String",
+        ],
+        "status": "A String",
+        "submissionTime": "A String",
+      },
+      "poolData": { # Pool Data
+        "name": "A String",
+        "stageIds": [
+          "A String",
+        ],
+      },
+      "processSummary": { # Process Summary
+        "addTime": "A String",
+        "hostPort": "A String",
+        "isActive": True or False,
+        "processId": "A String",
+        "processLogs": {
+          "a_key": "A String",
+        },
+        "removeTime": "A String",
+        "totalCores": 42,
+      },
+      "rddOperationGraph": { # Graph representing RDD dependencies. Consists of edges and a root cluster.
+        "edges": [
+          { # A directed edge representing dependency between two RDDs.
+            "fromId": 42,
+            "toId": 42,
+          },
+        ],
+        "incomingEdges": [
+          { # A directed edge representing dependency between two RDDs.
+            "fromId": 42,
+            "toId": 42,
+          },
+        ],
+        "outgoingEdges": [
+          { # A directed edge representing dependency between two RDDs.
+            "fromId": 42,
+            "toId": 42,
+          },
+        ],
+        "rootCluster": { # A grouping of nodes representing higher level constructs (stage, job etc.).
+          "childClusters": [
+            # Object with schema name: RddOperationCluster
+          ],
+          "childNodes": [
+            { # A node in the RDD operation graph. Corresponds to a single RDD.
+              "barrier": True or False,
+              "cached": True or False,
+              "callsite": "A String",
+              "name": "A String",
+              "nodeId": 42,
+              "outputDeterministicLevel": "A String",
+            },
+          ],
+          "name": "A String",
+          "rddClusterId": "A String",
+        },
+        "stageId": "A String",
+      },
+      "rddStorageInfo": { # Overall data about RDD storage.
+        "dataDistribution": [
+          { # Details about RDD usage.
+            "address": "A String",
+            "diskUsed": "A String",
+            "memoryRemaining": "A String",
+            "memoryUsed": "A String",
+            "offHeapMemoryRemaining": "A String",
+            "offHeapMemoryUsed": "A String",
+            "onHeapMemoryRemaining": "A String",
+            "onHeapMemoryUsed": "A String",
+          },
+        ],
+        "diskUsed": "A String",
+        "memoryUsed": "A String",
+        "name": "A String",
+        "numCachedPartitions": 42,
+        "numPartitions": 42,
+        "partitions": [
+          { # Information about RDD partitions.
+            "blockName": "A String",
+            "diskUsed": "A String",
+            "executors": [
+              "A String",
+            ],
+            "memoryUsed": "A String",
+            "storageLevel": "A String",
+          },
+        ],
+        "rddStorageId": 42,
+        "storageLevel": "A String",
+      },
+      "resourceProfileInfo": { # Resource profile that contains information about all the resources required by executors and tasks.
+        "executorResources": {
+          "a_key": { # Resources used per executor used by the application.
+            "amount": "A String",
+            "discoveryScript": "A String",
+            "resourceName": "A String",
+            "vendor": "A String",
+          },
+        },
+        "resourceProfileId": 42,
+        "taskResources": {
+          "a_key": { # Resources used per task created by the application.
+            "amount": 3.14,
+            "resourceName": "A String",
+          },
+        },
+      },
+      "sparkPlanGraph": { # A graph used for storing information of an executionPlan of DataFrame.
+        "edges": [
+          { # Represents a directed edge in the spark plan tree from child to parent.
+            "fromId": "A String",
+            "toId": "A String",
+          },
+        ],
+        "executionId": "A String",
+        "nodes": [
+          { # Wrapper user to represent either a node or a cluster.
+            "cluster": { # Represents a tree of spark plan.
+              "desc": "A String",
+              "metrics": [
+                { # Metrics related to SQL execution.
+                  "accumulatorId": "A String",
+                  "metricType": "A String",
+                  "name": "A String",
+                },
+              ],
+              "name": "A String",
+              "nodes": [
+                # Object with schema name: SparkPlanGraphNodeWrapper
+              ],
+              "sparkPlanGraphClusterId": "A String",
+            },
+            "node": { # Represents a node in the spark plan tree.
+              "desc": "A String",
+              "metrics": [
+                { # Metrics related to SQL execution.
+                  "accumulatorId": "A String",
+                  "metricType": "A String",
+                  "name": "A String",
+                },
+              ],
+              "name": "A String",
+              "sparkPlanGraphNodeId": "A String",
+            },
+          },
+        ],
+      },
+      "speculationStageSummary": { # Details of the speculation task when speculative execution is enabled.
+        "numActiveTasks": 42,
+        "numCompletedTasks": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+      },
+      "sqlExecutionUiData": { # SQL Execution Data
+        "completionTime": "A String",
+        "description": "A String",
+        "details": "A String",
+        "errorMessage": "A String",
+        "executionId": "A String",
+        "jobs": {
+          "a_key": "A String",
+        },
+        "metricValues": {
+          "a_key": "A String",
+        },
+        "metricValuesIsNull": True or False,
+        "metrics": [
+          { # Metrics related to SQL execution.
+            "accumulatorId": "A String",
+            "metricType": "A String",
+            "name": "A String",
+          },
+        ],
+        "modifiedConfigs": {
+          "a_key": "A String",
+        },
+        "physicalPlanDescription": "A String",
+        "rootExecutionId": "A String",
+        "stages": [
+          "A String",
+        ],
+        "submissionTime": "A String",
+      },
+      "stageData": { # Data corresponding to a stage.
+        "accumulatorUpdates": [
+          {
+            "accumullableInfoId": "A String",
+            "name": "A String",
+            "update": "A String",
+            "value": "A String",
+          },
+        ],
+        "completionTime": "A String",
+        "description": "A String",
+        "details": "A String",
+        "executorMetricsDistributions": {
+          "diskBytesSpilled": [
+            3.14,
+          ],
+          "failedTasks": [
+            3.14,
+          ],
+          "inputBytes": [
+            3.14,
+          ],
+          "inputRecords": [
+            3.14,
+          ],
+          "killedTasks": [
+            3.14,
+          ],
+          "memoryBytesSpilled": [
+            3.14,
+          ],
+          "outputBytes": [
+            3.14,
+          ],
+          "outputRecords": [
+            3.14,
+          ],
+          "peakMemoryMetrics": {
+            "executorMetrics": [
+              {
+                "metrics": {
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "quantiles": [
+              3.14,
+            ],
+          },
+          "quantiles": [
+            3.14,
+          ],
+          "shuffleRead": [
+            3.14,
+          ],
+          "shuffleReadRecords": [
+            3.14,
+          ],
+          "shuffleWrite": [
+            3.14,
+          ],
+          "shuffleWriteRecords": [
+            3.14,
+          ],
+          "succeededTasks": [
+            3.14,
+          ],
+          "taskTimeMillis": [
+            3.14,
+          ],
+        },
+        "executorSummary": {
+          "a_key": { # Executor resources consumed by a stage.
+            "diskBytesSpilled": "A String",
+            "executorId": "A String",
+            "failedTasks": 42,
+            "inputBytes": "A String",
+            "inputRecords": "A String",
+            "isExcludedForStage": True or False,
+            "killedTasks": 42,
+            "memoryBytesSpilled": "A String",
+            "outputBytes": "A String",
+            "outputRecords": "A String",
+            "peakMemoryMetrics": {
+              "metrics": {
+                "a_key": "A String",
+              },
+            },
+            "shuffleRead": "A String",
+            "shuffleReadRecords": "A String",
+            "shuffleWrite": "A String",
+            "shuffleWriteRecords": "A String",
+            "stageAttemptId": 42,
+            "stageId": "A String",
+            "succeededTasks": 42,
+            "taskTimeMillis": "A String",
+          },
+        },
+        "failureReason": "A String",
+        "firstTaskLaunchedTime": "A String",
+        "isShufflePushEnabled": True or False,
+        "jobIds": [
+          "A String",
+        ],
+        "killedTasksSummary": {
+          "a_key": 42,
+        },
+        "locality": {
+          "a_key": "A String",
+        },
+        "name": "A String",
+        "numActiveTasks": 42,
+        "numCompleteTasks": 42,
+        "numCompletedIndices": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "parentStageIds": [
+          "A String",
+        ],
+        "peakExecutorMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "rddIds": [
+          "A String",
+        ],
+        "resourceProfileId": 42,
+        "schedulingPool": "A String",
+        "shuffleMergersCount": 42,
+        "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+          "numActiveTasks": 42,
+          "numCompletedTasks": 42,
+          "numFailedTasks": 42,
+          "numKilledTasks": 42,
+          "numTasks": 42,
+          "stageAttemptId": 42,
+          "stageId": "A String",
+        },
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "stageMetrics": { # Stage Level Aggregated Metrics
+          "diskBytesSpilled": "A String",
+          "executorCpuTimeNanos": "A String",
+          "executorDeserializeCpuTimeNanos": "A String",
+          "executorDeserializeTimeMillis": "A String",
+          "executorRunTimeMillis": "A String",
+          "jvmGcTimeMillis": "A String",
+          "memoryBytesSpilled": "A String",
+          "peakExecutionMemoryBytes": "A String",
+          "resultSerializationTimeMillis": "A String",
+          "resultSize": "A String",
+          "stageInputMetrics": { # Metrics about the input read by the stage.
+            "bytesRead": "A String",
+            "recordsRead": "A String",
+          },
+          "stageOutputMetrics": { # Metrics about the output written by the stage.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+          },
+          "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+            "bytesRead": "A String",
+            "fetchWaitTimeMillis": "A String",
+            "localBlocksFetched": "A String",
+            "localBytesRead": "A String",
+            "recordsRead": "A String",
+            "remoteBlocksFetched": "A String",
+            "remoteBytesRead": "A String",
+            "remoteBytesReadToDisk": "A String",
+            "remoteReqsDuration": "A String",
+            "stageShufflePushReadMetrics": {
+              "corruptMergedBlockChunks": "A String",
+              "localMergedBlocksFetched": "A String",
+              "localMergedBytesRead": "A String",
+              "localMergedChunksFetched": "A String",
+              "mergedFetchFallbackCount": "A String",
+              "remoteMergedBlocksFetched": "A String",
+              "remoteMergedBytesRead": "A String",
+              "remoteMergedChunksFetched": "A String",
+              "remoteMergedReqsDuration": "A String",
+            },
+          },
+          "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+            "writeTimeNanos": "A String",
+          },
+        },
+        "status": "A String",
+        "submissionTime": "A String",
+        "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+          "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "inputMetrics": {
+            "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "outputMetrics": {
+            "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "shuffleReadMetrics": {
+            "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "shufflePushReadMetrics": {
+              "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+            },
+            "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "shuffleWriteMetrics": {
+            "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+        },
+        "tasks": {
+          "a_key": { # Data corresponding to tasks created by spark.
+            "accumulatorUpdates": [
+              {
+                "accumullableInfoId": "A String",
+                "name": "A String",
+                "update": "A String",
+                "value": "A String",
+              },
+            ],
+            "attempt": 42,
+            "durationMillis": "A String",
+            "errorMessage": "A String",
+            "executorId": "A String",
+            "executorLogs": {
+              "a_key": "A String",
+            },
+            "gettingResultTimeMillis": "A String",
+            "hasMetrics": True or False,
+            "host": "A String",
+            "index": 42,
+            "launchTime": "A String",
+            "partitionId": 42,
+            "resultFetchStart": "A String",
+            "schedulerDelayMillis": "A String",
+            "speculative": True or False,
+            "stageAttemptId": 42,
+            "stageId": "A String",
+            "status": "A String",
+            "taskId": "A String",
+            "taskLocality": "A String",
+            "taskMetrics": { # Executor Task Metrics
+              "diskBytesSpilled": "A String",
+              "executorCpuTimeNanos": "A String",
+              "executorDeserializeCpuTimeNanos": "A String",
+              "executorDeserializeTimeMillis": "A String",
+              "executorRunTimeMillis": "A String",
+              "inputMetrics": { # Metrics about the input data read by the task.
+                "bytesRead": "A String",
+                "recordsRead": "A String",
+              },
+              "jvmGcTimeMillis": "A String",
+              "memoryBytesSpilled": "A String",
+              "outputMetrics": { # Metrics about the data written by the task.
+                "bytesWritten": "A String",
+                "recordsWritten": "A String",
+              },
+              "peakExecutionMemoryBytes": "A String",
+              "resultSerializationTimeMillis": "A String",
+              "resultSize": "A String",
+              "shuffleReadMetrics": { # Shuffle data read by the task.
+                "fetchWaitTimeMillis": "A String",
+                "localBlocksFetched": "A String",
+                "localBytesRead": "A String",
+                "recordsRead": "A String",
+                "remoteBlocksFetched": "A String",
+                "remoteBytesRead": "A String",
+                "remoteBytesReadToDisk": "A String",
+                "remoteReqsDuration": "A String",
+                "shufflePushReadMetrics": {
+                  "corruptMergedBlockChunks": "A String",
+                  "localMergedBlocksFetched": "A String",
+                  "localMergedBytesRead": "A String",
+                  "localMergedChunksFetched": "A String",
+                  "mergedFetchFallbackCount": "A String",
+                  "remoteMergedBlocksFetched": "A String",
+                  "remoteMergedBytesRead": "A String",
+                  "remoteMergedChunksFetched": "A String",
+                  "remoteMergedReqsDuration": "A String",
+                },
+              },
+              "shuffleWriteMetrics": { # Shuffle data written by task.
+                "bytesWritten": "A String",
+                "recordsWritten": "A String",
+                "writeTimeNanos": "A String",
+              },
+            },
+          },
+        },
+      },
+      "streamBlockData": { # Stream Block Data.
+        "deserialized": True or False,
+        "diskSize": "A String",
+        "executorId": "A String",
+        "hostPort": "A String",
+        "memSize": "A String",
+        "name": "A String",
+        "storageLevel": "A String",
+        "useDisk": True or False,
+        "useMemory": True or False,
+      },
+      "streamingQueryData": { # Streaming
+        "endTimestamp": "A String",
+        "exception": "A String",
+        "isActive": True or False,
+        "name": "A String",
+        "runId": "A String",
+        "startTimestamp": "A String",
+        "streamingQueryId": "A String",
+      },
+      "streamingQueryProgress": {
+        "batchDuration": "A String",
+        "batchId": "A String",
+        "durationMillis": {
+          "a_key": "A String",
+        },
+        "eventTime": {
+          "a_key": "A String",
+        },
+        "name": "A String",
+        "observedMetrics": {
+          "a_key": "A String",
+        },
+        "runId": "A String",
+        "sink": {
+          "description": "A String",
+          "metrics": {
+            "a_key": "A String",
+          },
+          "numOutputRows": "A String",
+        },
+        "sources": [
+          {
+            "description": "A String",
+            "endOffset": "A String",
+            "inputRowsPerSecond": 3.14,
+            "latestOffset": "A String",
+            "metrics": {
+              "a_key": "A String",
+            },
+            "numInputRows": "A String",
+            "processedRowsPerSecond": 3.14,
+            "startOffset": "A String",
+          },
+        ],
+        "stateOperators": [
+          {
+            "allRemovalsTimeMs": "A String",
+            "allUpdatesTimeMs": "A String",
+            "commitTimeMs": "A String",
+            "customMetrics": {
+              "a_key": "A String",
+            },
+            "memoryUsedBytes": "A String",
+            "numRowsDroppedByWatermark": "A String",
+            "numRowsRemoved": "A String",
+            "numRowsTotal": "A String",
+            "numRowsUpdated": "A String",
+            "numShufflePartitions": "A String",
+            "numStateStoreInstances": "A String",
+            "operatorName": "A String",
+          },
+        ],
+        "streamingQueryProgressId": "A String",
+        "timestamp": "A String",
+      },
+      "taskData": { # Data corresponding to tasks created by spark.
+        "accumulatorUpdates": [
+          {
+            "accumullableInfoId": "A String",
+            "name": "A String",
+            "update": "A String",
+            "value": "A String",
+          },
+        ],
+        "attempt": 42,
+        "durationMillis": "A String",
+        "errorMessage": "A String",
+        "executorId": "A String",
+        "executorLogs": {
+          "a_key": "A String",
+        },
+        "gettingResultTimeMillis": "A String",
+        "hasMetrics": True or False,
+        "host": "A String",
+        "index": 42,
+        "launchTime": "A String",
+        "partitionId": 42,
+        "resultFetchStart": "A String",
+        "schedulerDelayMillis": "A String",
+        "speculative": True or False,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "status": "A String",
+        "taskId": "A String",
+        "taskLocality": "A String",
+        "taskMetrics": { # Executor Task Metrics
+          "diskBytesSpilled": "A String",
+          "executorCpuTimeNanos": "A String",
+          "executorDeserializeCpuTimeNanos": "A String",
+          "executorDeserializeTimeMillis": "A String",
+          "executorRunTimeMillis": "A String",
+          "inputMetrics": { # Metrics about the input data read by the task.
+            "bytesRead": "A String",
+            "recordsRead": "A String",
+          },
+          "jvmGcTimeMillis": "A String",
+          "memoryBytesSpilled": "A String",
+          "outputMetrics": { # Metrics about the data written by the task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+          },
+          "peakExecutionMemoryBytes": "A String",
+          "resultSerializationTimeMillis": "A String",
+          "resultSize": "A String",
+          "shuffleReadMetrics": { # Shuffle data read by the task.
+            "fetchWaitTimeMillis": "A String",
+            "localBlocksFetched": "A String",
+            "localBytesRead": "A String",
+            "recordsRead": "A String",
+            "remoteBlocksFetched": "A String",
+            "remoteBytesRead": "A String",
+            "remoteBytesReadToDisk": "A String",
+            "remoteReqsDuration": "A String",
+            "shufflePushReadMetrics": {
+              "corruptMergedBlockChunks": "A String",
+              "localMergedBlocksFetched": "A String",
+              "localMergedBytesRead": "A String",
+              "localMergedChunksFetched": "A String",
+              "mergedFetchFallbackCount": "A String",
+              "remoteMergedBlocksFetched": "A String",
+              "remoteMergedBytesRead": "A String",
+              "remoteMergedChunksFetched": "A String",
+              "remoteMergedReqsDuration": "A String",
+            },
+          },
+          "shuffleWriteMetrics": { # Shuffle data written by task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+            "writeTimeNanos": "A String",
+          },
+        },
+      },
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response returned as an acknowledgement of receipt of data.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.html b/docs/dyn/dataproc_v1.projects.locations.sessions.html index 3f2ef832bc4..8d7b9d0ac7d 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.html @@ -74,6 +74,11 @@

Cloud Dataproc API . projects . locations . sessions

Instance Methods

+

+ sparkApplications() +

+

Returns the sparkApplications Resource.

+

close()

Close httplib2 connections.

diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html b/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html new file mode 100644 index 00000000000..f8fe24d416a --- /dev/null +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html @@ -0,0 +1,4334 @@ + + + +

Cloud Dataproc API . projects . locations . sessions . sparkApplications

+

Instance Methods

+

+ access(name, parent=None, x__xgafv=None)

+

Obtain high level information corresponding to a single Spark Application.

+

+ accessEnvironmentInfo(name, parent=None, x__xgafv=None)

+

Obtain environment details for a Spark Application

+

+ accessJob(name, jobId=None, parent=None, x__xgafv=None)

+

Obtain data corresponding to a spark job for a Spark Application.

+

+ accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None)

+

Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.

+

+ accessSqlQuery(name, details=None, executionId=None, parent=None, planDescription=None, x__xgafv=None)

+

Obtain data corresponding to a particular SQL Query for a Spark Application.

+

+ accessStageAttempt(name, parent=None, stageAttemptId=None, stageId=None, summaryMetricsMask=None, x__xgafv=None)

+

Obtain data corresponding to a spark stage attempt for a Spark Application.

+

+ accessStageRddGraph(name, parent=None, stageId=None, x__xgafv=None)

+

Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.

+

+ close()

+

Close httplib2 connections.

+

+ search(parent, applicationStatus=None, maxEndTime=None, maxTime=None, minEndTime=None, minTime=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Obtain high level information and list of Spark Applications corresponding to a batch

+

+ searchExecutorStageSummary(name, pageSize=None, pageToken=None, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None)

+

Obtain executor summary with respect to a spark stage attempt.

+

+ searchExecutorStageSummary_next()

+

Retrieves the next page of results.

+

+ searchExecutors(name, executorStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

+

Obtain data corresponding to executors for a Spark Application.

+

+ searchExecutors_next()

+

Retrieves the next page of results.

+

+ searchJobs(name, jobStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

+

Obtain list of spark jobs corresponding to a Spark Application.

+

+ searchJobs_next()

+

Retrieves the next page of results.

+

+ searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None)

+

Obtain data corresponding to SQL Queries for a Spark Application.

+

+ searchSqlQueries_next()

+

Retrieves the next page of results.

+

+ searchStageAttemptTasks(name, pageSize=None, pageToken=None, parent=None, sortRuntime=None, stageAttemptId=None, stageId=None, taskStatus=None, x__xgafv=None)

+

Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.

+

+ searchStageAttemptTasks_next()

+

Retrieves the next page of results.

+

+ searchStageAttempts(name, pageSize=None, pageToken=None, parent=None, stageId=None, summaryMetricsMask=None, x__xgafv=None)

+

Obtain data corresponding to a spark stage attempts for a Spark Application.

+

+ searchStageAttempts_next()

+

Retrieves the next page of results.

+

+ searchStages(name, pageSize=None, pageToken=None, parent=None, stageStatus=None, summaryMetricsMask=None, x__xgafv=None)

+

Obtain data corresponding to stages for a Spark Application.

+

+ searchStages_next()

+

Retrieves the next page of results.

+

+ search_next()

+

Retrieves the next page of results.

+

+ summarizeExecutors(name, parent=None, x__xgafv=None)

+

Obtain summary of Executor Summary for a Spark Application

+

+ summarizeJobs(name, parent=None, x__xgafv=None)

+

Obtain summary of Jobs for a Spark Application

+

+ summarizeStageAttemptTasks(name, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None)

+

Obtain summary of Tasks for a Spark Application Stage Attempt

+

+ summarizeStages(name, parent=None, x__xgafv=None)

+

Obtain summary of Stages for a Spark Application

+

+ write(name, body=None, x__xgafv=None)

+

Write wrapper objects from dataplane to spanner

+

Method Details

+
+ access(name, parent=None, x__xgafv=None) +
Obtain high level information corresponding to a single Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A summary of Spark Application
+  "application": { # High level information corresponding to an application. # Output only. High level information corresponding to an application.
+    "applicationContextIngestionStatus": "A String",
+    "applicationId": "A String",
+    "attempts": [
+      { # Specific attempt of an application.
+        "appSparkVersion": "A String",
+        "attemptId": "A String",
+        "completed": True or False,
+        "durationMillis": "A String",
+        "endTime": "A String",
+        "lastUpdated": "A String",
+        "sparkUser": "A String",
+        "startTime": "A String",
+      },
+    ],
+    "coresGranted": 42,
+    "coresPerExecutor": 42,
+    "maxCores": 42,
+    "memoryPerExecutorMb": 42,
+    "name": "A String",
+    "quantileDataStatus": "A String",
+  },
+}
+
+ +
+ accessEnvironmentInfo(name, parent=None, x__xgafv=None) +
Obtain environment details for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Environment details of a Saprk Application.
+  "applicationEnvironmentInfo": { # Details about the Environment that the application is running in. # Details about the Environment that the application is running in.
+    "classpathEntries": {
+      "a_key": "A String",
+    },
+    "hadoopProperties": {
+      "a_key": "A String",
+    },
+    "metricsProperties": {
+      "a_key": "A String",
+    },
+    "resourceProfiles": [
+      { # Resource profile that contains information about all the resources required by executors and tasks.
+        "executorResources": {
+          "a_key": { # Resources used per executor used by the application.
+            "amount": "A String",
+            "discoveryScript": "A String",
+            "resourceName": "A String",
+            "vendor": "A String",
+          },
+        },
+        "resourceProfileId": 42,
+        "taskResources": {
+          "a_key": { # Resources used per task created by the application.
+            "amount": 3.14,
+            "resourceName": "A String",
+          },
+        },
+      },
+    ],
+    "runtime": {
+      "javaHome": "A String",
+      "javaVersion": "A String",
+      "scalaVersion": "A String",
+    },
+    "sparkProperties": {
+      "a_key": "A String",
+    },
+    "systemProperties": {
+      "a_key": "A String",
+    },
+  },
+}
+
+ +
+ accessJob(name, jobId=None, parent=None, x__xgafv=None) +
Obtain data corresponding to a spark job for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  jobId: string, Required. Job ID to fetch data for.
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Details of a particular job associated with Spark Application
+  "jobData": { # Data corresponding to a spark job. # Output only. Data corresponding to a spark job.
+    "completionTime": "A String",
+    "description": "A String",
+    "jobGroup": "A String",
+    "jobId": "A String",
+    "killTasksSummary": {
+      "a_key": 42,
+    },
+    "name": "A String",
+    "numActiveStages": 42,
+    "numActiveTasks": 42,
+    "numCompletedIndices": 42,
+    "numCompletedStages": 42,
+    "numCompletedTasks": 42,
+    "numFailedStages": 42,
+    "numFailedTasks": 42,
+    "numKilledTasks": 42,
+    "numSkippedStages": 42,
+    "numSkippedTasks": 42,
+    "numTasks": 42,
+    "skippedStages": [
+      42,
+    ],
+    "sqlExecutionId": "A String",
+    "stageIds": [
+      "A String",
+    ],
+    "status": "A String",
+    "submissionTime": "A String",
+  },
+}
+
+ +
+ accessSqlPlan(name, executionId=None, parent=None, x__xgafv=None) +
Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  executionId: string, Required. Execution ID
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # SparkPlanGraph for a Spark Application execution limited to maximum 10000 clusters.
+  "sparkPlanGraph": { # A graph used for storing information of an executionPlan of DataFrame. # SparkPlanGraph for a Spark Application execution.
+    "edges": [
+      { # Represents a directed edge in the spark plan tree from child to parent.
+        "fromId": "A String",
+        "toId": "A String",
+      },
+    ],
+    "executionId": "A String",
+    "nodes": [
+      { # Wrapper user to represent either a node or a cluster.
+        "cluster": { # Represents a tree of spark plan.
+          "desc": "A String",
+          "metrics": [
+            { # Metrics related to SQL execution.
+              "accumulatorId": "A String",
+              "metricType": "A String",
+              "name": "A String",
+            },
+          ],
+          "name": "A String",
+          "nodes": [
+            # Object with schema name: SparkPlanGraphNodeWrapper
+          ],
+          "sparkPlanGraphClusterId": "A String",
+        },
+        "node": { # Represents a node in the spark plan tree.
+          "desc": "A String",
+          "metrics": [
+            { # Metrics related to SQL execution.
+              "accumulatorId": "A String",
+              "metricType": "A String",
+              "name": "A String",
+            },
+          ],
+          "name": "A String",
+          "sparkPlanGraphNodeId": "A String",
+        },
+      },
+    ],
+  },
+}
+
+ +
+ accessSqlQuery(name, details=None, executionId=None, parent=None, planDescription=None, x__xgafv=None) +
Obtain data corresponding to a particular SQL Query for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  details: boolean, Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.
+  executionId: string, Required. Execution ID
+  parent: string, Required. Parent (Session) resource reference.
+  planDescription: boolean, Optional. Enables/ disables physical plan description on demand
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Details of a query for a Spark Application
+  "executionData": { # SQL Execution Data # SQL Execution Data
+    "completionTime": "A String",
+    "description": "A String",
+    "details": "A String",
+    "errorMessage": "A String",
+    "executionId": "A String",
+    "jobs": {
+      "a_key": "A String",
+    },
+    "metricValues": {
+      "a_key": "A String",
+    },
+    "metricValuesIsNull": True or False,
+    "metrics": [
+      { # Metrics related to SQL execution.
+        "accumulatorId": "A String",
+        "metricType": "A String",
+        "name": "A String",
+      },
+    ],
+    "modifiedConfigs": {
+      "a_key": "A String",
+    },
+    "physicalPlanDescription": "A String",
+    "rootExecutionId": "A String",
+    "stages": [
+      "A String",
+    ],
+    "submissionTime": "A String",
+  },
+}
+
+ +
+ accessStageAttempt(name, parent=None, stageAttemptId=None, stageId=None, summaryMetricsMask=None, x__xgafv=None) +
Obtain data corresponding to a spark stage attempt for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  stageAttemptId: integer, Required. Stage Attempt ID
+  stageId: string, Required. Stage ID
+  summaryMetricsMask: string, Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Stage Attempt for a Stage of a Spark Application
+  "stageData": { # Data corresponding to a stage. # Output only. Data corresponding to a stage.
+    "accumulatorUpdates": [
+      {
+        "accumullableInfoId": "A String",
+        "name": "A String",
+        "update": "A String",
+        "value": "A String",
+      },
+    ],
+    "completionTime": "A String",
+    "description": "A String",
+    "details": "A String",
+    "executorMetricsDistributions": {
+      "diskBytesSpilled": [
+        3.14,
+      ],
+      "failedTasks": [
+        3.14,
+      ],
+      "inputBytes": [
+        3.14,
+      ],
+      "inputRecords": [
+        3.14,
+      ],
+      "killedTasks": [
+        3.14,
+      ],
+      "memoryBytesSpilled": [
+        3.14,
+      ],
+      "outputBytes": [
+        3.14,
+      ],
+      "outputRecords": [
+        3.14,
+      ],
+      "peakMemoryMetrics": {
+        "executorMetrics": [
+          {
+            "metrics": {
+              "a_key": "A String",
+            },
+          },
+        ],
+        "quantiles": [
+          3.14,
+        ],
+      },
+      "quantiles": [
+        3.14,
+      ],
+      "shuffleRead": [
+        3.14,
+      ],
+      "shuffleReadRecords": [
+        3.14,
+      ],
+      "shuffleWrite": [
+        3.14,
+      ],
+      "shuffleWriteRecords": [
+        3.14,
+      ],
+      "succeededTasks": [
+        3.14,
+      ],
+      "taskTimeMillis": [
+        3.14,
+      ],
+    },
+    "executorSummary": {
+      "a_key": { # Executor resources consumed by a stage.
+        "diskBytesSpilled": "A String",
+        "executorId": "A String",
+        "failedTasks": 42,
+        "inputBytes": "A String",
+        "inputRecords": "A String",
+        "isExcludedForStage": True or False,
+        "killedTasks": 42,
+        "memoryBytesSpilled": "A String",
+        "outputBytes": "A String",
+        "outputRecords": "A String",
+        "peakMemoryMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "shuffleRead": "A String",
+        "shuffleReadRecords": "A String",
+        "shuffleWrite": "A String",
+        "shuffleWriteRecords": "A String",
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "succeededTasks": 42,
+        "taskTimeMillis": "A String",
+      },
+    },
+    "failureReason": "A String",
+    "firstTaskLaunchedTime": "A String",
+    "isShufflePushEnabled": True or False,
+    "jobIds": [
+      "A String",
+    ],
+    "killedTasksSummary": {
+      "a_key": 42,
+    },
+    "locality": {
+      "a_key": "A String",
+    },
+    "name": "A String",
+    "numActiveTasks": 42,
+    "numCompleteTasks": 42,
+    "numCompletedIndices": 42,
+    "numFailedTasks": 42,
+    "numKilledTasks": 42,
+    "numTasks": 42,
+    "parentStageIds": [
+      "A String",
+    ],
+    "peakExecutorMetrics": {
+      "metrics": {
+        "a_key": "A String",
+      },
+    },
+    "rddIds": [
+      "A String",
+    ],
+    "resourceProfileId": 42,
+    "schedulingPool": "A String",
+    "shuffleMergersCount": 42,
+    "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+      "numActiveTasks": 42,
+      "numCompletedTasks": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numTasks": 42,
+      "stageAttemptId": 42,
+      "stageId": "A String",
+    },
+    "stageAttemptId": 42,
+    "stageId": "A String",
+    "stageMetrics": { # Stage Level Aggregated Metrics
+      "diskBytesSpilled": "A String",
+      "executorCpuTimeNanos": "A String",
+      "executorDeserializeCpuTimeNanos": "A String",
+      "executorDeserializeTimeMillis": "A String",
+      "executorRunTimeMillis": "A String",
+      "jvmGcTimeMillis": "A String",
+      "memoryBytesSpilled": "A String",
+      "peakExecutionMemoryBytes": "A String",
+      "resultSerializationTimeMillis": "A String",
+      "resultSize": "A String",
+      "stageInputMetrics": { # Metrics about the input read by the stage.
+        "bytesRead": "A String",
+        "recordsRead": "A String",
+      },
+      "stageOutputMetrics": { # Metrics about the output written by the stage.
+        "bytesWritten": "A String",
+        "recordsWritten": "A String",
+      },
+      "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+        "bytesRead": "A String",
+        "fetchWaitTimeMillis": "A String",
+        "localBlocksFetched": "A String",
+        "localBytesRead": "A String",
+        "recordsRead": "A String",
+        "remoteBlocksFetched": "A String",
+        "remoteBytesRead": "A String",
+        "remoteBytesReadToDisk": "A String",
+        "remoteReqsDuration": "A String",
+        "stageShufflePushReadMetrics": {
+          "corruptMergedBlockChunks": "A String",
+          "localMergedBlocksFetched": "A String",
+          "localMergedBytesRead": "A String",
+          "localMergedChunksFetched": "A String",
+          "mergedFetchFallbackCount": "A String",
+          "remoteMergedBlocksFetched": "A String",
+          "remoteMergedBytesRead": "A String",
+          "remoteMergedChunksFetched": "A String",
+          "remoteMergedReqsDuration": "A String",
+        },
+      },
+      "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+        "bytesWritten": "A String",
+        "recordsWritten": "A String",
+        "writeTimeNanos": "A String",
+      },
+    },
+    "status": "A String",
+    "submissionTime": "A String",
+    "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+      "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "inputMetrics": {
+        "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+      "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "outputMetrics": {
+        "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+      "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+        "count": "A String",
+        "maximum": "A String",
+        "minimum": "A String",
+        "percentile25": "A String",
+        "percentile50": "A String",
+        "percentile75": "A String",
+        "sum": "A String",
+      },
+      "shuffleReadMetrics": {
+        "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "shufflePushReadMetrics": {
+          "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+      "shuffleWriteMetrics": {
+        "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+      },
+    },
+    "tasks": {
+      "a_key": { # Data corresponding to tasks created by spark.
+        "accumulatorUpdates": [
+          {
+            "accumullableInfoId": "A String",
+            "name": "A String",
+            "update": "A String",
+            "value": "A String",
+          },
+        ],
+        "attempt": 42,
+        "durationMillis": "A String",
+        "errorMessage": "A String",
+        "executorId": "A String",
+        "executorLogs": {
+          "a_key": "A String",
+        },
+        "gettingResultTimeMillis": "A String",
+        "hasMetrics": True or False,
+        "host": "A String",
+        "index": 42,
+        "launchTime": "A String",
+        "partitionId": 42,
+        "resultFetchStart": "A String",
+        "schedulerDelayMillis": "A String",
+        "speculative": True or False,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "status": "A String",
+        "taskId": "A String",
+        "taskLocality": "A String",
+        "taskMetrics": { # Executor Task Metrics
+          "diskBytesSpilled": "A String",
+          "executorCpuTimeNanos": "A String",
+          "executorDeserializeCpuTimeNanos": "A String",
+          "executorDeserializeTimeMillis": "A String",
+          "executorRunTimeMillis": "A String",
+          "inputMetrics": { # Metrics about the input data read by the task.
+            "bytesRead": "A String",
+            "recordsRead": "A String",
+          },
+          "jvmGcTimeMillis": "A String",
+          "memoryBytesSpilled": "A String",
+          "outputMetrics": { # Metrics about the data written by the task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+          },
+          "peakExecutionMemoryBytes": "A String",
+          "resultSerializationTimeMillis": "A String",
+          "resultSize": "A String",
+          "shuffleReadMetrics": { # Shuffle data read by the task.
+            "fetchWaitTimeMillis": "A String",
+            "localBlocksFetched": "A String",
+            "localBytesRead": "A String",
+            "recordsRead": "A String",
+            "remoteBlocksFetched": "A String",
+            "remoteBytesRead": "A String",
+            "remoteBytesReadToDisk": "A String",
+            "remoteReqsDuration": "A String",
+            "shufflePushReadMetrics": {
+              "corruptMergedBlockChunks": "A String",
+              "localMergedBlocksFetched": "A String",
+              "localMergedBytesRead": "A String",
+              "localMergedChunksFetched": "A String",
+              "mergedFetchFallbackCount": "A String",
+              "remoteMergedBlocksFetched": "A String",
+              "remoteMergedBytesRead": "A String",
+              "remoteMergedChunksFetched": "A String",
+              "remoteMergedReqsDuration": "A String",
+            },
+          },
+          "shuffleWriteMetrics": { # Shuffle data written by task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+            "writeTimeNanos": "A String",
+          },
+        },
+      },
+    },
+  },
+}
+
+ +
+ accessStageRddGraph(name, parent=None, stageId=None, x__xgafv=None) +
Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  stageId: string, Required. Stage ID
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # RDD operation graph for a Spark Application Stage limited to maximum 10000 clusters.
+  "rddOperationGraph": { # Graph representing RDD dependencies. Consists of edges and a root cluster. # RDD operation graph for a Spark Application Stage.
+    "edges": [
+      { # A directed edge representing dependency between two RDDs.
+        "fromId": 42,
+        "toId": 42,
+      },
+    ],
+    "incomingEdges": [
+      { # A directed edge representing dependency between two RDDs.
+        "fromId": 42,
+        "toId": 42,
+      },
+    ],
+    "outgoingEdges": [
+      { # A directed edge representing dependency between two RDDs.
+        "fromId": 42,
+        "toId": 42,
+      },
+    ],
+    "rootCluster": { # A grouping of nodes representing higher level constructs (stage, job etc.).
+      "childClusters": [
+        # Object with schema name: RddOperationCluster
+      ],
+      "childNodes": [
+        { # A node in the RDD operation graph. Corresponds to a single RDD.
+          "barrier": True or False,
+          "cached": True or False,
+          "callsite": "A String",
+          "name": "A String",
+          "nodeId": 42,
+          "outputDeterministicLevel": "A String",
+        },
+      ],
+      "name": "A String",
+      "rddClusterId": "A String",
+    },
+    "stageId": "A String",
+  },
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ search(parent, applicationStatus=None, maxEndTime=None, maxTime=None, minEndTime=None, minTime=None, pageSize=None, pageToken=None, x__xgafv=None) +
Obtain high level information and list of Spark Applications corresponding to a batch
+
+Args:
+  parent: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID" (required)
+  applicationStatus: string, Optional. Search only applications in the chosen state.
+    Allowed values
+      APPLICATION_STATUS_UNSPECIFIED - 
+      APPLICATION_STATUS_RUNNING - 
+      APPLICATION_STATUS_COMPLETED - 
+  maxEndTime: string, Optional. Latest end timestamp to list.
+  maxTime: string, Optional. Latest start timestamp to list.
+  minEndTime: string, Optional. Earliest end timestamp to list.
+  minTime: string, Optional. Earliest start timestamp to list.
+  pageSize: integer, Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplications call. Provide this token to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of summary of Spark Applications
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationsRequest.
+  "sparkApplications": [ # Output only. High level information corresponding to an application.
+    { # A summary of Spark Application
+      "application": { # High level information corresponding to an application. # Output only. High level information corresponding to an application.
+        "applicationContextIngestionStatus": "A String",
+        "applicationId": "A String",
+        "attempts": [
+          { # Specific attempt of an application.
+            "appSparkVersion": "A String",
+            "attemptId": "A String",
+            "completed": True or False,
+            "durationMillis": "A String",
+            "endTime": "A String",
+            "lastUpdated": "A String",
+            "sparkUser": "A String",
+            "startTime": "A String",
+          },
+        ],
+        "coresGranted": 42,
+        "coresPerExecutor": 42,
+        "maxCores": 42,
+        "memoryPerExecutorMb": 42,
+        "name": "A String",
+        "quantileDataStatus": "A String",
+      },
+      "name": "A String", # Identifier. Name of the spark application
+    },
+  ],
+}
+
+ +
+ searchExecutorStageSummary(name, pageSize=None, pageToken=None, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None) +
Obtain executor summary with respect to a spark stage attempt.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationExecutorStageSummary call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  stageAttemptId: integer, Required. Stage Attempt ID
+  stageId: string, Required. Stage ID
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of Executors associated with a Spark Application Stage.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorStageSummaryRequest.
+  "sparkApplicationStageExecutors": [ # Details about executors used by the application stage.
+    { # Executor resources consumed by a stage.
+      "diskBytesSpilled": "A String",
+      "executorId": "A String",
+      "failedTasks": 42,
+      "inputBytes": "A String",
+      "inputRecords": "A String",
+      "isExcludedForStage": True or False,
+      "killedTasks": 42,
+      "memoryBytesSpilled": "A String",
+      "outputBytes": "A String",
+      "outputRecords": "A String",
+      "peakMemoryMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "shuffleRead": "A String",
+      "shuffleReadRecords": "A String",
+      "shuffleWrite": "A String",
+      "shuffleWriteRecords": "A String",
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "succeededTasks": 42,
+      "taskTimeMillis": "A String",
+    },
+  ],
+}
+
+ +
+ searchExecutorStageSummary_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchExecutors(name, executorStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None) +
Obtain data corresponding to executors for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  executorStatus: string, Optional. Filter to select whether active/ dead or all executors should be selected.
+    Allowed values
+      EXECUTOR_STATUS_UNSPECIFIED - 
+      EXECUTOR_STATUS_ACTIVE - 
+      EXECUTOR_STATUS_DEAD - 
+  pageSize: integer, Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationExecutors call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of Executors associated with a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorsRequest.
+  "sparkApplicationExecutors": [ # Details about executors used by the application.
+    { # Details about executors used by the application.
+      "activeTasks": 42,
+      "addTime": "A String",
+      "attributes": {
+        "a_key": "A String",
+      },
+      "completedTasks": 42,
+      "diskUsed": "A String",
+      "excludedInStages": [
+        "A String",
+      ],
+      "executorId": "A String",
+      "executorLogs": {
+        "a_key": "A String",
+      },
+      "failedTasks": 42,
+      "hostPort": "A String",
+      "isActive": True or False,
+      "isExcluded": True or False,
+      "maxMemory": "A String",
+      "maxTasks": 42,
+      "memoryMetrics": {
+        "totalOffHeapStorageMemory": "A String",
+        "totalOnHeapStorageMemory": "A String",
+        "usedOffHeapStorageMemory": "A String",
+        "usedOnHeapStorageMemory": "A String",
+      },
+      "memoryUsed": "A String",
+      "peakMemoryMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "rddBlocks": 42,
+      "removeReason": "A String",
+      "removeTime": "A String",
+      "resourceProfileId": 42,
+      "resources": {
+        "a_key": {
+          "addresses": [
+            "A String",
+          ],
+          "name": "A String",
+        },
+      },
+      "totalCores": 42,
+      "totalDurationMillis": "A String",
+      "totalGcTimeMillis": "A String",
+      "totalInputBytes": "A String",
+      "totalShuffleRead": "A String",
+      "totalShuffleWrite": "A String",
+      "totalTasks": 42,
+    },
+  ],
+}
+
+ +
+ searchExecutors_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchJobs(name, jobStatus=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None) +
Obtain list of spark jobs corresponding to a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  jobStatus: string, Optional. List only jobs in the specific state.
+    Allowed values
+      JOB_EXECUTION_STATUS_UNSPECIFIED - 
+      JOB_EXECUTION_STATUS_RUNNING - 
+      JOB_EXECUTION_STATUS_SUCCEEDED - 
+      JOB_EXECUTION_STATUS_FAILED - 
+      JOB_EXECUTION_STATUS_UNKNOWN - 
+  pageSize: integer, Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationJobs call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of Jobs associated with a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationJobsRequest.
+  "sparkApplicationJobs": [ # Output only. Data corresponding to a spark job.
+    { # Data corresponding to a spark job.
+      "completionTime": "A String",
+      "description": "A String",
+      "jobGroup": "A String",
+      "jobId": "A String",
+      "killTasksSummary": {
+        "a_key": 42,
+      },
+      "name": "A String",
+      "numActiveStages": 42,
+      "numActiveTasks": 42,
+      "numCompletedIndices": 42,
+      "numCompletedStages": 42,
+      "numCompletedTasks": 42,
+      "numFailedStages": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numSkippedStages": 42,
+      "numSkippedTasks": 42,
+      "numTasks": 42,
+      "skippedStages": [
+        42,
+      ],
+      "sqlExecutionId": "A String",
+      "stageIds": [
+        "A String",
+      ],
+      "status": "A String",
+      "submissionTime": "A String",
+    },
+  ],
+}
+
+ +
+ searchJobs_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchSqlQueries(name, details=None, pageSize=None, pageToken=None, parent=None, planDescription=None, x__xgafv=None) +
Obtain data corresponding to SQL Queries for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  details: boolean, Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.
+  pageSize: integer, Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  planDescription: boolean, Optional. Enables/ disables physical plan description on demand
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of all queries for a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest.
+  "sparkApplicationSqlQueries": [ # Output only. SQL Execution Data
+    { # SQL Execution Data
+      "completionTime": "A String",
+      "description": "A String",
+      "details": "A String",
+      "errorMessage": "A String",
+      "executionId": "A String",
+      "jobs": {
+        "a_key": "A String",
+      },
+      "metricValues": {
+        "a_key": "A String",
+      },
+      "metricValuesIsNull": True or False,
+      "metrics": [
+        { # Metrics related to SQL execution.
+          "accumulatorId": "A String",
+          "metricType": "A String",
+          "name": "A String",
+        },
+      ],
+      "modifiedConfigs": {
+        "a_key": "A String",
+      },
+      "physicalPlanDescription": "A String",
+      "rootExecutionId": "A String",
+      "stages": [
+        "A String",
+      ],
+      "submissionTime": "A String",
+    },
+  ],
+}
+
+ +
+ searchSqlQueries_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchStageAttemptTasks(name, pageSize=None, pageToken=None, parent=None, sortRuntime=None, stageAttemptId=None, stageId=None, taskStatus=None, x__xgafv=None) +
Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  sortRuntime: boolean, Optional. Sort the tasks by runtime.
+  stageAttemptId: integer, Optional. Stage Attempt ID
+  stageId: string, Optional. Stage ID
+  taskStatus: string, Optional. List only tasks in the state.
+    Allowed values
+      TASK_STATUS_UNSPECIFIED - 
+      TASK_STATUS_RUNNING - 
+      TASK_STATUS_SUCCESS - 
+      TASK_STATUS_FAILED - 
+      TASK_STATUS_KILLED - 
+      TASK_STATUS_PENDING - 
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # List of tasks for a stage of a Spark Application
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptTasksRequest.
+  "sparkApplicationStageAttemptTasks": [ # Output only. Data corresponding to tasks created by spark.
+    { # Data corresponding to tasks created by spark.
+      "accumulatorUpdates": [
+        {
+          "accumullableInfoId": "A String",
+          "name": "A String",
+          "update": "A String",
+          "value": "A String",
+        },
+      ],
+      "attempt": 42,
+      "durationMillis": "A String",
+      "errorMessage": "A String",
+      "executorId": "A String",
+      "executorLogs": {
+        "a_key": "A String",
+      },
+      "gettingResultTimeMillis": "A String",
+      "hasMetrics": True or False,
+      "host": "A String",
+      "index": 42,
+      "launchTime": "A String",
+      "partitionId": 42,
+      "resultFetchStart": "A String",
+      "schedulerDelayMillis": "A String",
+      "speculative": True or False,
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "status": "A String",
+      "taskId": "A String",
+      "taskLocality": "A String",
+      "taskMetrics": { # Executor Task Metrics
+        "diskBytesSpilled": "A String",
+        "executorCpuTimeNanos": "A String",
+        "executorDeserializeCpuTimeNanos": "A String",
+        "executorDeserializeTimeMillis": "A String",
+        "executorRunTimeMillis": "A String",
+        "inputMetrics": { # Metrics about the input data read by the task.
+          "bytesRead": "A String",
+          "recordsRead": "A String",
+        },
+        "jvmGcTimeMillis": "A String",
+        "memoryBytesSpilled": "A String",
+        "outputMetrics": { # Metrics about the data written by the task.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+        },
+        "peakExecutionMemoryBytes": "A String",
+        "resultSerializationTimeMillis": "A String",
+        "resultSize": "A String",
+        "shuffleReadMetrics": { # Shuffle data read by the task.
+          "fetchWaitTimeMillis": "A String",
+          "localBlocksFetched": "A String",
+          "localBytesRead": "A String",
+          "recordsRead": "A String",
+          "remoteBlocksFetched": "A String",
+          "remoteBytesRead": "A String",
+          "remoteBytesReadToDisk": "A String",
+          "remoteReqsDuration": "A String",
+          "shufflePushReadMetrics": {
+            "corruptMergedBlockChunks": "A String",
+            "localMergedBlocksFetched": "A String",
+            "localMergedBytesRead": "A String",
+            "localMergedChunksFetched": "A String",
+            "mergedFetchFallbackCount": "A String",
+            "remoteMergedBlocksFetched": "A String",
+            "remoteMergedBytesRead": "A String",
+            "remoteMergedChunksFetched": "A String",
+            "remoteMergedReqsDuration": "A String",
+          },
+        },
+        "shuffleWriteMetrics": { # Shuffle data written by task.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+          "writeTimeNanos": "A String",
+        },
+      },
+    },
+  ],
+}
+
+ +
+ searchStageAttemptTasks_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchStageAttempts(name, pageSize=None, pageToken=None, parent=None, stageId=None, summaryMetricsMask=None, x__xgafv=None) +
Obtain data corresponding to a spark stage attempts for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  stageId: string, Required. Stage ID for which attempts are to be fetched
+  summaryMetricsMask: string, Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of Stage Attempts for a Stage of a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptsRequest.
+  "sparkApplicationStageAttempts": [ # Output only. Data corresponding to a stage attempts
+    { # Data corresponding to a stage.
+      "accumulatorUpdates": [
+        {
+          "accumullableInfoId": "A String",
+          "name": "A String",
+          "update": "A String",
+          "value": "A String",
+        },
+      ],
+      "completionTime": "A String",
+      "description": "A String",
+      "details": "A String",
+      "executorMetricsDistributions": {
+        "diskBytesSpilled": [
+          3.14,
+        ],
+        "failedTasks": [
+          3.14,
+        ],
+        "inputBytes": [
+          3.14,
+        ],
+        "inputRecords": [
+          3.14,
+        ],
+        "killedTasks": [
+          3.14,
+        ],
+        "memoryBytesSpilled": [
+          3.14,
+        ],
+        "outputBytes": [
+          3.14,
+        ],
+        "outputRecords": [
+          3.14,
+        ],
+        "peakMemoryMetrics": {
+          "executorMetrics": [
+            {
+              "metrics": {
+                "a_key": "A String",
+              },
+            },
+          ],
+          "quantiles": [
+            3.14,
+          ],
+        },
+        "quantiles": [
+          3.14,
+        ],
+        "shuffleRead": [
+          3.14,
+        ],
+        "shuffleReadRecords": [
+          3.14,
+        ],
+        "shuffleWrite": [
+          3.14,
+        ],
+        "shuffleWriteRecords": [
+          3.14,
+        ],
+        "succeededTasks": [
+          3.14,
+        ],
+        "taskTimeMillis": [
+          3.14,
+        ],
+      },
+      "executorSummary": {
+        "a_key": { # Executor resources consumed by a stage.
+          "diskBytesSpilled": "A String",
+          "executorId": "A String",
+          "failedTasks": 42,
+          "inputBytes": "A String",
+          "inputRecords": "A String",
+          "isExcludedForStage": True or False,
+          "killedTasks": 42,
+          "memoryBytesSpilled": "A String",
+          "outputBytes": "A String",
+          "outputRecords": "A String",
+          "peakMemoryMetrics": {
+            "metrics": {
+              "a_key": "A String",
+            },
+          },
+          "shuffleRead": "A String",
+          "shuffleReadRecords": "A String",
+          "shuffleWrite": "A String",
+          "shuffleWriteRecords": "A String",
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "succeededTasks": 42,
+          "taskTimeMillis": "A String",
+        },
+      },
+      "failureReason": "A String",
+      "firstTaskLaunchedTime": "A String",
+      "isShufflePushEnabled": True or False,
+      "jobIds": [
+        "A String",
+      ],
+      "killedTasksSummary": {
+        "a_key": 42,
+      },
+      "locality": {
+        "a_key": "A String",
+      },
+      "name": "A String",
+      "numActiveTasks": 42,
+      "numCompleteTasks": 42,
+      "numCompletedIndices": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numTasks": 42,
+      "parentStageIds": [
+        "A String",
+      ],
+      "peakExecutorMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "rddIds": [
+        "A String",
+      ],
+      "resourceProfileId": 42,
+      "schedulingPool": "A String",
+      "shuffleMergersCount": 42,
+      "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+        "numActiveTasks": 42,
+        "numCompletedTasks": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+      },
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "stageMetrics": { # Stage Level Aggregated Metrics
+        "diskBytesSpilled": "A String",
+        "executorCpuTimeNanos": "A String",
+        "executorDeserializeCpuTimeNanos": "A String",
+        "executorDeserializeTimeMillis": "A String",
+        "executorRunTimeMillis": "A String",
+        "jvmGcTimeMillis": "A String",
+        "memoryBytesSpilled": "A String",
+        "peakExecutionMemoryBytes": "A String",
+        "resultSerializationTimeMillis": "A String",
+        "resultSize": "A String",
+        "stageInputMetrics": { # Metrics about the input read by the stage.
+          "bytesRead": "A String",
+          "recordsRead": "A String",
+        },
+        "stageOutputMetrics": { # Metrics about the output written by the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+        },
+        "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+          "bytesRead": "A String",
+          "fetchWaitTimeMillis": "A String",
+          "localBlocksFetched": "A String",
+          "localBytesRead": "A String",
+          "recordsRead": "A String",
+          "remoteBlocksFetched": "A String",
+          "remoteBytesRead": "A String",
+          "remoteBytesReadToDisk": "A String",
+          "remoteReqsDuration": "A String",
+          "stageShufflePushReadMetrics": {
+            "corruptMergedBlockChunks": "A String",
+            "localMergedBlocksFetched": "A String",
+            "localMergedBytesRead": "A String",
+            "localMergedChunksFetched": "A String",
+            "mergedFetchFallbackCount": "A String",
+            "remoteMergedBlocksFetched": "A String",
+            "remoteMergedBytesRead": "A String",
+            "remoteMergedChunksFetched": "A String",
+            "remoteMergedReqsDuration": "A String",
+          },
+        },
+        "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+          "writeTimeNanos": "A String",
+        },
+      },
+      "status": "A String",
+      "submissionTime": "A String",
+      "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+        "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "inputMetrics": {
+          "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "outputMetrics": {
+          "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "shuffleReadMetrics": {
+          "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "shufflePushReadMetrics": {
+            "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "shuffleWriteMetrics": {
+          "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+      },
+      "tasks": {
+        "a_key": { # Data corresponding to tasks created by spark.
+          "accumulatorUpdates": [
+            {
+              "accumullableInfoId": "A String",
+              "name": "A String",
+              "update": "A String",
+              "value": "A String",
+            },
+          ],
+          "attempt": 42,
+          "durationMillis": "A String",
+          "errorMessage": "A String",
+          "executorId": "A String",
+          "executorLogs": {
+            "a_key": "A String",
+          },
+          "gettingResultTimeMillis": "A String",
+          "hasMetrics": True or False,
+          "host": "A String",
+          "index": 42,
+          "launchTime": "A String",
+          "partitionId": 42,
+          "resultFetchStart": "A String",
+          "schedulerDelayMillis": "A String",
+          "speculative": True or False,
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "status": "A String",
+          "taskId": "A String",
+          "taskLocality": "A String",
+          "taskMetrics": { # Executor Task Metrics
+            "diskBytesSpilled": "A String",
+            "executorCpuTimeNanos": "A String",
+            "executorDeserializeCpuTimeNanos": "A String",
+            "executorDeserializeTimeMillis": "A String",
+            "executorRunTimeMillis": "A String",
+            "inputMetrics": { # Metrics about the input data read by the task.
+              "bytesRead": "A String",
+              "recordsRead": "A String",
+            },
+            "jvmGcTimeMillis": "A String",
+            "memoryBytesSpilled": "A String",
+            "outputMetrics": { # Metrics about the data written by the task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+            },
+            "peakExecutionMemoryBytes": "A String",
+            "resultSerializationTimeMillis": "A String",
+            "resultSize": "A String",
+            "shuffleReadMetrics": { # Shuffle data read by the task.
+              "fetchWaitTimeMillis": "A String",
+              "localBlocksFetched": "A String",
+              "localBytesRead": "A String",
+              "recordsRead": "A String",
+              "remoteBlocksFetched": "A String",
+              "remoteBytesRead": "A String",
+              "remoteBytesReadToDisk": "A String",
+              "remoteReqsDuration": "A String",
+              "shufflePushReadMetrics": {
+                "corruptMergedBlockChunks": "A String",
+                "localMergedBlocksFetched": "A String",
+                "localMergedBytesRead": "A String",
+                "localMergedChunksFetched": "A String",
+                "mergedFetchFallbackCount": "A String",
+                "remoteMergedBlocksFetched": "A String",
+                "remoteMergedBytesRead": "A String",
+                "remoteMergedChunksFetched": "A String",
+                "remoteMergedReqsDuration": "A String",
+              },
+            },
+            "shuffleWriteMetrics": { # Shuffle data written by task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+              "writeTimeNanos": "A String",
+            },
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ searchStageAttempts_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ searchStages(name, pageSize=None, pageToken=None, parent=None, stageStatus=None, summaryMetricsMask=None, x__xgafv=None) +
Obtain data corresponding to stages for a Spark Application.
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  pageSize: integer, Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.
+  pageToken: string, Optional. A page token received from a previous SearchSessionSparkApplicationStages call. Provide this token to retrieve the subsequent page.
+  parent: string, Required. Parent (Session) resource reference.
+  stageStatus: string, Optional. List only stages in the given state.
+    Allowed values
+      STAGE_STATUS_UNSPECIFIED - 
+      STAGE_STATUS_ACTIVE - 
+      STAGE_STATUS_COMPLETE - 
+      STAGE_STATUS_FAILED - 
+      STAGE_STATUS_PENDING - 
+      STAGE_STATUS_SKIPPED - 
+  summaryMetricsMask: string, Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A list of stages associated with a Spark Application.
+  "nextPageToken": "A String", # This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStages.
+  "sparkApplicationStages": [ # Output only. Data corresponding to a stage.
+    { # Data corresponding to a stage.
+      "accumulatorUpdates": [
+        {
+          "accumullableInfoId": "A String",
+          "name": "A String",
+          "update": "A String",
+          "value": "A String",
+        },
+      ],
+      "completionTime": "A String",
+      "description": "A String",
+      "details": "A String",
+      "executorMetricsDistributions": {
+        "diskBytesSpilled": [
+          3.14,
+        ],
+        "failedTasks": [
+          3.14,
+        ],
+        "inputBytes": [
+          3.14,
+        ],
+        "inputRecords": [
+          3.14,
+        ],
+        "killedTasks": [
+          3.14,
+        ],
+        "memoryBytesSpilled": [
+          3.14,
+        ],
+        "outputBytes": [
+          3.14,
+        ],
+        "outputRecords": [
+          3.14,
+        ],
+        "peakMemoryMetrics": {
+          "executorMetrics": [
+            {
+              "metrics": {
+                "a_key": "A String",
+              },
+            },
+          ],
+          "quantiles": [
+            3.14,
+          ],
+        },
+        "quantiles": [
+          3.14,
+        ],
+        "shuffleRead": [
+          3.14,
+        ],
+        "shuffleReadRecords": [
+          3.14,
+        ],
+        "shuffleWrite": [
+          3.14,
+        ],
+        "shuffleWriteRecords": [
+          3.14,
+        ],
+        "succeededTasks": [
+          3.14,
+        ],
+        "taskTimeMillis": [
+          3.14,
+        ],
+      },
+      "executorSummary": {
+        "a_key": { # Executor resources consumed by a stage.
+          "diskBytesSpilled": "A String",
+          "executorId": "A String",
+          "failedTasks": 42,
+          "inputBytes": "A String",
+          "inputRecords": "A String",
+          "isExcludedForStage": True or False,
+          "killedTasks": 42,
+          "memoryBytesSpilled": "A String",
+          "outputBytes": "A String",
+          "outputRecords": "A String",
+          "peakMemoryMetrics": {
+            "metrics": {
+              "a_key": "A String",
+            },
+          },
+          "shuffleRead": "A String",
+          "shuffleReadRecords": "A String",
+          "shuffleWrite": "A String",
+          "shuffleWriteRecords": "A String",
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "succeededTasks": 42,
+          "taskTimeMillis": "A String",
+        },
+      },
+      "failureReason": "A String",
+      "firstTaskLaunchedTime": "A String",
+      "isShufflePushEnabled": True or False,
+      "jobIds": [
+        "A String",
+      ],
+      "killedTasksSummary": {
+        "a_key": 42,
+      },
+      "locality": {
+        "a_key": "A String",
+      },
+      "name": "A String",
+      "numActiveTasks": 42,
+      "numCompleteTasks": 42,
+      "numCompletedIndices": 42,
+      "numFailedTasks": 42,
+      "numKilledTasks": 42,
+      "numTasks": 42,
+      "parentStageIds": [
+        "A String",
+      ],
+      "peakExecutorMetrics": {
+        "metrics": {
+          "a_key": "A String",
+        },
+      },
+      "rddIds": [
+        "A String",
+      ],
+      "resourceProfileId": 42,
+      "schedulingPool": "A String",
+      "shuffleMergersCount": 42,
+      "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+        "numActiveTasks": 42,
+        "numCompletedTasks": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+      },
+      "stageAttemptId": 42,
+      "stageId": "A String",
+      "stageMetrics": { # Stage Level Aggregated Metrics
+        "diskBytesSpilled": "A String",
+        "executorCpuTimeNanos": "A String",
+        "executorDeserializeCpuTimeNanos": "A String",
+        "executorDeserializeTimeMillis": "A String",
+        "executorRunTimeMillis": "A String",
+        "jvmGcTimeMillis": "A String",
+        "memoryBytesSpilled": "A String",
+        "peakExecutionMemoryBytes": "A String",
+        "resultSerializationTimeMillis": "A String",
+        "resultSize": "A String",
+        "stageInputMetrics": { # Metrics about the input read by the stage.
+          "bytesRead": "A String",
+          "recordsRead": "A String",
+        },
+        "stageOutputMetrics": { # Metrics about the output written by the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+        },
+        "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+          "bytesRead": "A String",
+          "fetchWaitTimeMillis": "A String",
+          "localBlocksFetched": "A String",
+          "localBytesRead": "A String",
+          "recordsRead": "A String",
+          "remoteBlocksFetched": "A String",
+          "remoteBytesRead": "A String",
+          "remoteBytesReadToDisk": "A String",
+          "remoteReqsDuration": "A String",
+          "stageShufflePushReadMetrics": {
+            "corruptMergedBlockChunks": "A String",
+            "localMergedBlocksFetched": "A String",
+            "localMergedBytesRead": "A String",
+            "localMergedChunksFetched": "A String",
+            "mergedFetchFallbackCount": "A String",
+            "remoteMergedBlocksFetched": "A String",
+            "remoteMergedBytesRead": "A String",
+            "remoteMergedChunksFetched": "A String",
+            "remoteMergedReqsDuration": "A String",
+          },
+        },
+        "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+          "bytesWritten": "A String",
+          "recordsWritten": "A String",
+          "writeTimeNanos": "A String",
+        },
+      },
+      "status": "A String",
+      "submissionTime": "A String",
+      "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+        "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "inputMetrics": {
+          "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "outputMetrics": {
+          "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+          "count": "A String",
+          "maximum": "A String",
+          "minimum": "A String",
+          "percentile25": "A String",
+          "percentile50": "A String",
+          "percentile75": "A String",
+          "sum": "A String",
+        },
+        "shuffleReadMetrics": {
+          "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "shufflePushReadMetrics": {
+            "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+        "shuffleWriteMetrics": {
+          "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+        },
+      },
+      "tasks": {
+        "a_key": { # Data corresponding to tasks created by spark.
+          "accumulatorUpdates": [
+            {
+              "accumullableInfoId": "A String",
+              "name": "A String",
+              "update": "A String",
+              "value": "A String",
+            },
+          ],
+          "attempt": 42,
+          "durationMillis": "A String",
+          "errorMessage": "A String",
+          "executorId": "A String",
+          "executorLogs": {
+            "a_key": "A String",
+          },
+          "gettingResultTimeMillis": "A String",
+          "hasMetrics": True or False,
+          "host": "A String",
+          "index": 42,
+          "launchTime": "A String",
+          "partitionId": 42,
+          "resultFetchStart": "A String",
+          "schedulerDelayMillis": "A String",
+          "speculative": True or False,
+          "stageAttemptId": 42,
+          "stageId": "A String",
+          "status": "A String",
+          "taskId": "A String",
+          "taskLocality": "A String",
+          "taskMetrics": { # Executor Task Metrics
+            "diskBytesSpilled": "A String",
+            "executorCpuTimeNanos": "A String",
+            "executorDeserializeCpuTimeNanos": "A String",
+            "executorDeserializeTimeMillis": "A String",
+            "executorRunTimeMillis": "A String",
+            "inputMetrics": { # Metrics about the input data read by the task.
+              "bytesRead": "A String",
+              "recordsRead": "A String",
+            },
+            "jvmGcTimeMillis": "A String",
+            "memoryBytesSpilled": "A String",
+            "outputMetrics": { # Metrics about the data written by the task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+            },
+            "peakExecutionMemoryBytes": "A String",
+            "resultSerializationTimeMillis": "A String",
+            "resultSize": "A String",
+            "shuffleReadMetrics": { # Shuffle data read by the task.
+              "fetchWaitTimeMillis": "A String",
+              "localBlocksFetched": "A String",
+              "localBytesRead": "A String",
+              "recordsRead": "A String",
+              "remoteBlocksFetched": "A String",
+              "remoteBytesRead": "A String",
+              "remoteBytesReadToDisk": "A String",
+              "remoteReqsDuration": "A String",
+              "shufflePushReadMetrics": {
+                "corruptMergedBlockChunks": "A String",
+                "localMergedBlocksFetched": "A String",
+                "localMergedBytesRead": "A String",
+                "localMergedChunksFetched": "A String",
+                "mergedFetchFallbackCount": "A String",
+                "remoteMergedBlocksFetched": "A String",
+                "remoteMergedBytesRead": "A String",
+                "remoteMergedChunksFetched": "A String",
+                "remoteMergedReqsDuration": "A String",
+              },
+            },
+            "shuffleWriteMetrics": { # Shuffle data written by task.
+              "bytesWritten": "A String",
+              "recordsWritten": "A String",
+              "writeTimeNanos": "A String",
+            },
+          },
+        },
+      },
+    },
+  ],
+}
+
+ +
+ searchStages_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ search_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ summarizeExecutors(name, parent=None, x__xgafv=None) +
Obtain summary of Executor Summary for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Consolidated summary of executors for a Spark Application.
+  "activeExecutorSummary": { # Consolidated summary about executors used by the application. # Consolidated summary for active executors.
+    "activeTasks": 42,
+    "completedTasks": 42,
+    "count": 42,
+    "diskUsed": "A String",
+    "failedTasks": 42,
+    "isExcluded": 42,
+    "maxMemory": "A String",
+    "memoryMetrics": {
+      "totalOffHeapStorageMemory": "A String",
+      "totalOnHeapStorageMemory": "A String",
+      "usedOffHeapStorageMemory": "A String",
+      "usedOnHeapStorageMemory": "A String",
+    },
+    "memoryUsed": "A String",
+    "rddBlocks": 42,
+    "totalCores": 42,
+    "totalDurationMillis": "A String",
+    "totalGcTimeMillis": "A String",
+    "totalInputBytes": "A String",
+    "totalShuffleRead": "A String",
+    "totalShuffleWrite": "A String",
+    "totalTasks": 42,
+  },
+  "applicationId": "A String", # Spark Application Id
+  "deadExecutorSummary": { # Consolidated summary about executors used by the application. # Consolidated summary for dead executors.
+    "activeTasks": 42,
+    "completedTasks": 42,
+    "count": 42,
+    "diskUsed": "A String",
+    "failedTasks": 42,
+    "isExcluded": 42,
+    "maxMemory": "A String",
+    "memoryMetrics": {
+      "totalOffHeapStorageMemory": "A String",
+      "totalOnHeapStorageMemory": "A String",
+      "usedOffHeapStorageMemory": "A String",
+      "usedOnHeapStorageMemory": "A String",
+    },
+    "memoryUsed": "A String",
+    "rddBlocks": 42,
+    "totalCores": 42,
+    "totalDurationMillis": "A String",
+    "totalGcTimeMillis": "A String",
+    "totalInputBytes": "A String",
+    "totalShuffleRead": "A String",
+    "totalShuffleWrite": "A String",
+    "totalTasks": 42,
+  },
+  "totalExecutorSummary": { # Consolidated summary about executors used by the application. # Overall consolidated summary for all executors.
+    "activeTasks": 42,
+    "completedTasks": 42,
+    "count": 42,
+    "diskUsed": "A String",
+    "failedTasks": 42,
+    "isExcluded": 42,
+    "maxMemory": "A String",
+    "memoryMetrics": {
+      "totalOffHeapStorageMemory": "A String",
+      "totalOnHeapStorageMemory": "A String",
+      "usedOffHeapStorageMemory": "A String",
+      "usedOnHeapStorageMemory": "A String",
+    },
+    "memoryUsed": "A String",
+    "rddBlocks": 42,
+    "totalCores": 42,
+    "totalDurationMillis": "A String",
+    "totalGcTimeMillis": "A String",
+    "totalInputBytes": "A String",
+    "totalShuffleRead": "A String",
+    "totalShuffleWrite": "A String",
+    "totalTasks": 42,
+  },
+}
+
+ +
+ summarizeJobs(name, parent=None, x__xgafv=None) +
Obtain summary of Jobs for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Summary of a Spark Application jobs.
+  "jobsSummary": { # Data related to Jobs page summary # Summary of a Spark Application Jobs
+    "activeJobs": 42, # Number of active jobs
+    "applicationId": "A String", # Spark Application Id
+    "attempts": [ # Attempts info
+      { # Specific attempt of an application.
+        "appSparkVersion": "A String",
+        "attemptId": "A String",
+        "completed": True or False,
+        "durationMillis": "A String",
+        "endTime": "A String",
+        "lastUpdated": "A String",
+        "sparkUser": "A String",
+        "startTime": "A String",
+      },
+    ],
+    "completedJobs": 42, # Number of completed jobs
+    "failedJobs": 42, # Number of failed jobs
+    "schedulingMode": "A String", # Spark Scheduling mode
+  },
+}
+
+ +
+ summarizeStageAttemptTasks(name, parent=None, stageAttemptId=None, stageId=None, x__xgafv=None) +
Obtain summary of Tasks for a Spark Application Stage Attempt
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  stageAttemptId: integer, Required. Stage Attempt ID
+  stageId: string, Required. Stage ID
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Summary of tasks for a Spark Application stage attempt.
+  "stageAttemptTasksSummary": { # Data related to tasks summary for a Spark Stage Attempt # Summary of tasks for a Spark Application Stage Attempt
+    "applicationId": "A String",
+    "numFailedTasks": 42,
+    "numKilledTasks": 42,
+    "numPendingTasks": 42,
+    "numRunningTasks": 42,
+    "numSuccessTasks": 42,
+    "numTasks": 42,
+    "stageAttemptId": 42,
+    "stageId": "A String",
+  },
+}
+
+ +
+ summarizeStages(name, parent=None, x__xgafv=None) +
Obtain summary of Stages for a Spark Application
+
+Args:
+  name: string, Required. The fully qualified name of the session to retrieve in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  parent: string, Required. Parent (Session) resource reference.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Summary of a Spark Application stages.
+  "stagesSummary": { # Data related to Stages page summary # Summary of a Spark Application Stages
+    "applicationId": "A String",
+    "numActiveStages": 42,
+    "numCompletedStages": 42,
+    "numFailedStages": 42,
+    "numPendingStages": 42,
+    "numSkippedStages": 42,
+  },
+}
+
+ +
+ write(name, body=None, x__xgafv=None) +
Write wrapper objects from dataplane to spanner
+
+Args:
+  name: string, Required. The fully qualified name of the spark application to write data about in the format "projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID" (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Write Spark Application data to internal storage systems
+  "parent": "A String", # Required. Parent (Batch) resource reference.
+  "sparkWrapperObjects": [ # Required. The batch of spark application context objects sent for ingestion.
+    { # Outer message that contains the data obtained from spark listener, packaged with information that is required to process it.
+      "appSummary": {
+        "numCompletedJobs": 42,
+        "numCompletedStages": 42,
+      },
+      "applicationEnvironmentInfo": { # Details about the Environment that the application is running in.
+        "classpathEntries": {
+          "a_key": "A String",
+        },
+        "hadoopProperties": {
+          "a_key": "A String",
+        },
+        "metricsProperties": {
+          "a_key": "A String",
+        },
+        "resourceProfiles": [
+          { # Resource profile that contains information about all the resources required by executors and tasks.
+            "executorResources": {
+              "a_key": { # Resources used per executor used by the application.
+                "amount": "A String",
+                "discoveryScript": "A String",
+                "resourceName": "A String",
+                "vendor": "A String",
+              },
+            },
+            "resourceProfileId": 42,
+            "taskResources": {
+              "a_key": { # Resources used per task created by the application.
+                "amount": 3.14,
+                "resourceName": "A String",
+              },
+            },
+          },
+        ],
+        "runtime": {
+          "javaHome": "A String",
+          "javaVersion": "A String",
+          "scalaVersion": "A String",
+        },
+        "sparkProperties": {
+          "a_key": "A String",
+        },
+        "systemProperties": {
+          "a_key": "A String",
+        },
+      },
+      "applicationId": "A String", # Application Id created by Spark.
+      "applicationInfo": { # High level information corresponding to an application.
+        "applicationContextIngestionStatus": "A String",
+        "applicationId": "A String",
+        "attempts": [
+          { # Specific attempt of an application.
+            "appSparkVersion": "A String",
+            "attemptId": "A String",
+            "completed": True or False,
+            "durationMillis": "A String",
+            "endTime": "A String",
+            "lastUpdated": "A String",
+            "sparkUser": "A String",
+            "startTime": "A String",
+          },
+        ],
+        "coresGranted": 42,
+        "coresPerExecutor": 42,
+        "maxCores": 42,
+        "memoryPerExecutorMb": 42,
+        "name": "A String",
+        "quantileDataStatus": "A String",
+      },
+      "eventTimestamp": "A String", # VM Timestamp associated with the data object.
+      "executorStageSummary": { # Executor resources consumed by a stage.
+        "diskBytesSpilled": "A String",
+        "executorId": "A String",
+        "failedTasks": 42,
+        "inputBytes": "A String",
+        "inputRecords": "A String",
+        "isExcludedForStage": True or False,
+        "killedTasks": 42,
+        "memoryBytesSpilled": "A String",
+        "outputBytes": "A String",
+        "outputRecords": "A String",
+        "peakMemoryMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "shuffleRead": "A String",
+        "shuffleReadRecords": "A String",
+        "shuffleWrite": "A String",
+        "shuffleWriteRecords": "A String",
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "succeededTasks": 42,
+        "taskTimeMillis": "A String",
+      },
+      "executorSummary": { # Details about executors used by the application.
+        "activeTasks": 42,
+        "addTime": "A String",
+        "attributes": {
+          "a_key": "A String",
+        },
+        "completedTasks": 42,
+        "diskUsed": "A String",
+        "excludedInStages": [
+          "A String",
+        ],
+        "executorId": "A String",
+        "executorLogs": {
+          "a_key": "A String",
+        },
+        "failedTasks": 42,
+        "hostPort": "A String",
+        "isActive": True or False,
+        "isExcluded": True or False,
+        "maxMemory": "A String",
+        "maxTasks": 42,
+        "memoryMetrics": {
+          "totalOffHeapStorageMemory": "A String",
+          "totalOnHeapStorageMemory": "A String",
+          "usedOffHeapStorageMemory": "A String",
+          "usedOnHeapStorageMemory": "A String",
+        },
+        "memoryUsed": "A String",
+        "peakMemoryMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "rddBlocks": 42,
+        "removeReason": "A String",
+        "removeTime": "A String",
+        "resourceProfileId": 42,
+        "resources": {
+          "a_key": {
+            "addresses": [
+              "A String",
+            ],
+            "name": "A String",
+          },
+        },
+        "totalCores": 42,
+        "totalDurationMillis": "A String",
+        "totalGcTimeMillis": "A String",
+        "totalInputBytes": "A String",
+        "totalShuffleRead": "A String",
+        "totalShuffleWrite": "A String",
+        "totalTasks": 42,
+      },
+      "jobData": { # Data corresponding to a spark job.
+        "completionTime": "A String",
+        "description": "A String",
+        "jobGroup": "A String",
+        "jobId": "A String",
+        "killTasksSummary": {
+          "a_key": 42,
+        },
+        "name": "A String",
+        "numActiveStages": 42,
+        "numActiveTasks": 42,
+        "numCompletedIndices": 42,
+        "numCompletedStages": 42,
+        "numCompletedTasks": 42,
+        "numFailedStages": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numSkippedStages": 42,
+        "numSkippedTasks": 42,
+        "numTasks": 42,
+        "skippedStages": [
+          42,
+        ],
+        "sqlExecutionId": "A String",
+        "stageIds": [
+          "A String",
+        ],
+        "status": "A String",
+        "submissionTime": "A String",
+      },
+      "poolData": { # Pool Data
+        "name": "A String",
+        "stageIds": [
+          "A String",
+        ],
+      },
+      "processSummary": { # Process Summary
+        "addTime": "A String",
+        "hostPort": "A String",
+        "isActive": True or False,
+        "processId": "A String",
+        "processLogs": {
+          "a_key": "A String",
+        },
+        "removeTime": "A String",
+        "totalCores": 42,
+      },
+      "rddOperationGraph": { # Graph representing RDD dependencies. Consists of edges and a root cluster.
+        "edges": [
+          { # A directed edge representing dependency between two RDDs.
+            "fromId": 42,
+            "toId": 42,
+          },
+        ],
+        "incomingEdges": [
+          { # A directed edge representing dependency between two RDDs.
+            "fromId": 42,
+            "toId": 42,
+          },
+        ],
+        "outgoingEdges": [
+          { # A directed edge representing dependency between two RDDs.
+            "fromId": 42,
+            "toId": 42,
+          },
+        ],
+        "rootCluster": { # A grouping of nodes representing higher level constructs (stage, job etc.).
+          "childClusters": [
+            # Object with schema name: RddOperationCluster
+          ],
+          "childNodes": [
+            { # A node in the RDD operation graph. Corresponds to a single RDD.
+              "barrier": True or False,
+              "cached": True or False,
+              "callsite": "A String",
+              "name": "A String",
+              "nodeId": 42,
+              "outputDeterministicLevel": "A String",
+            },
+          ],
+          "name": "A String",
+          "rddClusterId": "A String",
+        },
+        "stageId": "A String",
+      },
+      "rddStorageInfo": { # Overall data about RDD storage.
+        "dataDistribution": [
+          { # Details about RDD usage.
+            "address": "A String",
+            "diskUsed": "A String",
+            "memoryRemaining": "A String",
+            "memoryUsed": "A String",
+            "offHeapMemoryRemaining": "A String",
+            "offHeapMemoryUsed": "A String",
+            "onHeapMemoryRemaining": "A String",
+            "onHeapMemoryUsed": "A String",
+          },
+        ],
+        "diskUsed": "A String",
+        "memoryUsed": "A String",
+        "name": "A String",
+        "numCachedPartitions": 42,
+        "numPartitions": 42,
+        "partitions": [
+          { # Information about RDD partitions.
+            "blockName": "A String",
+            "diskUsed": "A String",
+            "executors": [
+              "A String",
+            ],
+            "memoryUsed": "A String",
+            "storageLevel": "A String",
+          },
+        ],
+        "rddStorageId": 42,
+        "storageLevel": "A String",
+      },
+      "resourceProfileInfo": { # Resource profile that contains information about all the resources required by executors and tasks.
+        "executorResources": {
+          "a_key": { # Resources used per executor used by the application.
+            "amount": "A String",
+            "discoveryScript": "A String",
+            "resourceName": "A String",
+            "vendor": "A String",
+          },
+        },
+        "resourceProfileId": 42,
+        "taskResources": {
+          "a_key": { # Resources used per task created by the application.
+            "amount": 3.14,
+            "resourceName": "A String",
+          },
+        },
+      },
+      "sparkPlanGraph": { # A graph used for storing information of an executionPlan of DataFrame.
+        "edges": [
+          { # Represents a directed edge in the spark plan tree from child to parent.
+            "fromId": "A String",
+            "toId": "A String",
+          },
+        ],
+        "executionId": "A String",
+        "nodes": [
+          { # Wrapper user to represent either a node or a cluster.
+            "cluster": { # Represents a tree of spark plan.
+              "desc": "A String",
+              "metrics": [
+                { # Metrics related to SQL execution.
+                  "accumulatorId": "A String",
+                  "metricType": "A String",
+                  "name": "A String",
+                },
+              ],
+              "name": "A String",
+              "nodes": [
+                # Object with schema name: SparkPlanGraphNodeWrapper
+              ],
+              "sparkPlanGraphClusterId": "A String",
+            },
+            "node": { # Represents a node in the spark plan tree.
+              "desc": "A String",
+              "metrics": [
+                { # Metrics related to SQL execution.
+                  "accumulatorId": "A String",
+                  "metricType": "A String",
+                  "name": "A String",
+                },
+              ],
+              "name": "A String",
+              "sparkPlanGraphNodeId": "A String",
+            },
+          },
+        ],
+      },
+      "speculationStageSummary": { # Details of the speculation task when speculative execution is enabled.
+        "numActiveTasks": 42,
+        "numCompletedTasks": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+      },
+      "sqlExecutionUiData": { # SQL Execution Data
+        "completionTime": "A String",
+        "description": "A String",
+        "details": "A String",
+        "errorMessage": "A String",
+        "executionId": "A String",
+        "jobs": {
+          "a_key": "A String",
+        },
+        "metricValues": {
+          "a_key": "A String",
+        },
+        "metricValuesIsNull": True or False,
+        "metrics": [
+          { # Metrics related to SQL execution.
+            "accumulatorId": "A String",
+            "metricType": "A String",
+            "name": "A String",
+          },
+        ],
+        "modifiedConfigs": {
+          "a_key": "A String",
+        },
+        "physicalPlanDescription": "A String",
+        "rootExecutionId": "A String",
+        "stages": [
+          "A String",
+        ],
+        "submissionTime": "A String",
+      },
+      "stageData": { # Data corresponding to a stage.
+        "accumulatorUpdates": [
+          {
+            "accumullableInfoId": "A String",
+            "name": "A String",
+            "update": "A String",
+            "value": "A String",
+          },
+        ],
+        "completionTime": "A String",
+        "description": "A String",
+        "details": "A String",
+        "executorMetricsDistributions": {
+          "diskBytesSpilled": [
+            3.14,
+          ],
+          "failedTasks": [
+            3.14,
+          ],
+          "inputBytes": [
+            3.14,
+          ],
+          "inputRecords": [
+            3.14,
+          ],
+          "killedTasks": [
+            3.14,
+          ],
+          "memoryBytesSpilled": [
+            3.14,
+          ],
+          "outputBytes": [
+            3.14,
+          ],
+          "outputRecords": [
+            3.14,
+          ],
+          "peakMemoryMetrics": {
+            "executorMetrics": [
+              {
+                "metrics": {
+                  "a_key": "A String",
+                },
+              },
+            ],
+            "quantiles": [
+              3.14,
+            ],
+          },
+          "quantiles": [
+            3.14,
+          ],
+          "shuffleRead": [
+            3.14,
+          ],
+          "shuffleReadRecords": [
+            3.14,
+          ],
+          "shuffleWrite": [
+            3.14,
+          ],
+          "shuffleWriteRecords": [
+            3.14,
+          ],
+          "succeededTasks": [
+            3.14,
+          ],
+          "taskTimeMillis": [
+            3.14,
+          ],
+        },
+        "executorSummary": {
+          "a_key": { # Executor resources consumed by a stage.
+            "diskBytesSpilled": "A String",
+            "executorId": "A String",
+            "failedTasks": 42,
+            "inputBytes": "A String",
+            "inputRecords": "A String",
+            "isExcludedForStage": True or False,
+            "killedTasks": 42,
+            "memoryBytesSpilled": "A String",
+            "outputBytes": "A String",
+            "outputRecords": "A String",
+            "peakMemoryMetrics": {
+              "metrics": {
+                "a_key": "A String",
+              },
+            },
+            "shuffleRead": "A String",
+            "shuffleReadRecords": "A String",
+            "shuffleWrite": "A String",
+            "shuffleWriteRecords": "A String",
+            "stageAttemptId": 42,
+            "stageId": "A String",
+            "succeededTasks": 42,
+            "taskTimeMillis": "A String",
+          },
+        },
+        "failureReason": "A String",
+        "firstTaskLaunchedTime": "A String",
+        "isShufflePushEnabled": True or False,
+        "jobIds": [
+          "A String",
+        ],
+        "killedTasksSummary": {
+          "a_key": 42,
+        },
+        "locality": {
+          "a_key": "A String",
+        },
+        "name": "A String",
+        "numActiveTasks": 42,
+        "numCompleteTasks": 42,
+        "numCompletedIndices": 42,
+        "numFailedTasks": 42,
+        "numKilledTasks": 42,
+        "numTasks": 42,
+        "parentStageIds": [
+          "A String",
+        ],
+        "peakExecutorMetrics": {
+          "metrics": {
+            "a_key": "A String",
+          },
+        },
+        "rddIds": [
+          "A String",
+        ],
+        "resourceProfileId": 42,
+        "schedulingPool": "A String",
+        "shuffleMergersCount": 42,
+        "speculationSummary": { # Details of the speculation task when speculative execution is enabled.
+          "numActiveTasks": 42,
+          "numCompletedTasks": 42,
+          "numFailedTasks": 42,
+          "numKilledTasks": 42,
+          "numTasks": 42,
+          "stageAttemptId": 42,
+          "stageId": "A String",
+        },
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "stageMetrics": { # Stage Level Aggregated Metrics
+          "diskBytesSpilled": "A String",
+          "executorCpuTimeNanos": "A String",
+          "executorDeserializeCpuTimeNanos": "A String",
+          "executorDeserializeTimeMillis": "A String",
+          "executorRunTimeMillis": "A String",
+          "jvmGcTimeMillis": "A String",
+          "memoryBytesSpilled": "A String",
+          "peakExecutionMemoryBytes": "A String",
+          "resultSerializationTimeMillis": "A String",
+          "resultSize": "A String",
+          "stageInputMetrics": { # Metrics about the input read by the stage.
+            "bytesRead": "A String",
+            "recordsRead": "A String",
+          },
+          "stageOutputMetrics": { # Metrics about the output written by the stage.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+          },
+          "stageShuffleReadMetrics": { # Shuffle data read for the stage.
+            "bytesRead": "A String",
+            "fetchWaitTimeMillis": "A String",
+            "localBlocksFetched": "A String",
+            "localBytesRead": "A String",
+            "recordsRead": "A String",
+            "remoteBlocksFetched": "A String",
+            "remoteBytesRead": "A String",
+            "remoteBytesReadToDisk": "A String",
+            "remoteReqsDuration": "A String",
+            "stageShufflePushReadMetrics": {
+              "corruptMergedBlockChunks": "A String",
+              "localMergedBlocksFetched": "A String",
+              "localMergedBytesRead": "A String",
+              "localMergedChunksFetched": "A String",
+              "mergedFetchFallbackCount": "A String",
+              "remoteMergedBlocksFetched": "A String",
+              "remoteMergedBytesRead": "A String",
+              "remoteMergedChunksFetched": "A String",
+              "remoteMergedReqsDuration": "A String",
+            },
+          },
+          "stageShuffleWriteMetrics": { # Shuffle data written for the stage.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+            "writeTimeNanos": "A String",
+          },
+        },
+        "status": "A String",
+        "submissionTime": "A String",
+        "taskQuantileMetrics": { # Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request
+          "diskBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "durationMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorDeserializeCpuTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorDeserializeTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "executorRunTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "gettingResultTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "inputMetrics": {
+            "bytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "recordsRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "jvmGcTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "memoryBytesSpilled": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "outputMetrics": {
+            "bytesWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "recordsWritten": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "peakExecutionMemoryBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "resultSerializationTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "resultSize": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "schedulerDelayMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+            "count": "A String",
+            "maximum": "A String",
+            "minimum": "A String",
+            "percentile25": "A String",
+            "percentile50": "A String",
+            "percentile75": "A String",
+            "sum": "A String",
+          },
+          "shuffleReadMetrics": {
+            "fetchWaitTimeMillis": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "localBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "readBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "readRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteBytesReadToDisk": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "remoteReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "shufflePushReadMetrics": {
+              "corruptMergedBlockChunks": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "localMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "localMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "localMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "mergedFetchFallbackCount": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedBytesRead": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedChunksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+              "remoteMergedReqsDuration": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+                "count": "A String",
+                "maximum": "A String",
+                "minimum": "A String",
+                "percentile25": "A String",
+                "percentile50": "A String",
+                "percentile75": "A String",
+                "sum": "A String",
+              },
+            },
+            "totalBlocksFetched": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+          "shuffleWriteMetrics": {
+            "writeBytes": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "writeRecords": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+            "writeTimeNanos": { # Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.
+              "count": "A String",
+              "maximum": "A String",
+              "minimum": "A String",
+              "percentile25": "A String",
+              "percentile50": "A String",
+              "percentile75": "A String",
+              "sum": "A String",
+            },
+          },
+        },
+        "tasks": {
+          "a_key": { # Data corresponding to tasks created by spark.
+            "accumulatorUpdates": [
+              {
+                "accumullableInfoId": "A String",
+                "name": "A String",
+                "update": "A String",
+                "value": "A String",
+              },
+            ],
+            "attempt": 42,
+            "durationMillis": "A String",
+            "errorMessage": "A String",
+            "executorId": "A String",
+            "executorLogs": {
+              "a_key": "A String",
+            },
+            "gettingResultTimeMillis": "A String",
+            "hasMetrics": True or False,
+            "host": "A String",
+            "index": 42,
+            "launchTime": "A String",
+            "partitionId": 42,
+            "resultFetchStart": "A String",
+            "schedulerDelayMillis": "A String",
+            "speculative": True or False,
+            "stageAttemptId": 42,
+            "stageId": "A String",
+            "status": "A String",
+            "taskId": "A String",
+            "taskLocality": "A String",
+            "taskMetrics": { # Executor Task Metrics
+              "diskBytesSpilled": "A String",
+              "executorCpuTimeNanos": "A String",
+              "executorDeserializeCpuTimeNanos": "A String",
+              "executorDeserializeTimeMillis": "A String",
+              "executorRunTimeMillis": "A String",
+              "inputMetrics": { # Metrics about the input data read by the task.
+                "bytesRead": "A String",
+                "recordsRead": "A String",
+              },
+              "jvmGcTimeMillis": "A String",
+              "memoryBytesSpilled": "A String",
+              "outputMetrics": { # Metrics about the data written by the task.
+                "bytesWritten": "A String",
+                "recordsWritten": "A String",
+              },
+              "peakExecutionMemoryBytes": "A String",
+              "resultSerializationTimeMillis": "A String",
+              "resultSize": "A String",
+              "shuffleReadMetrics": { # Shuffle data read by the task.
+                "fetchWaitTimeMillis": "A String",
+                "localBlocksFetched": "A String",
+                "localBytesRead": "A String",
+                "recordsRead": "A String",
+                "remoteBlocksFetched": "A String",
+                "remoteBytesRead": "A String",
+                "remoteBytesReadToDisk": "A String",
+                "remoteReqsDuration": "A String",
+                "shufflePushReadMetrics": {
+                  "corruptMergedBlockChunks": "A String",
+                  "localMergedBlocksFetched": "A String",
+                  "localMergedBytesRead": "A String",
+                  "localMergedChunksFetched": "A String",
+                  "mergedFetchFallbackCount": "A String",
+                  "remoteMergedBlocksFetched": "A String",
+                  "remoteMergedBytesRead": "A String",
+                  "remoteMergedChunksFetched": "A String",
+                  "remoteMergedReqsDuration": "A String",
+                },
+              },
+              "shuffleWriteMetrics": { # Shuffle data written by task.
+                "bytesWritten": "A String",
+                "recordsWritten": "A String",
+                "writeTimeNanos": "A String",
+              },
+            },
+          },
+        },
+      },
+      "streamBlockData": { # Stream Block Data.
+        "deserialized": True or False,
+        "diskSize": "A String",
+        "executorId": "A String",
+        "hostPort": "A String",
+        "memSize": "A String",
+        "name": "A String",
+        "storageLevel": "A String",
+        "useDisk": True or False,
+        "useMemory": True or False,
+      },
+      "streamingQueryData": { # Streaming
+        "endTimestamp": "A String",
+        "exception": "A String",
+        "isActive": True or False,
+        "name": "A String",
+        "runId": "A String",
+        "startTimestamp": "A String",
+        "streamingQueryId": "A String",
+      },
+      "streamingQueryProgress": {
+        "batchDuration": "A String",
+        "batchId": "A String",
+        "durationMillis": {
+          "a_key": "A String",
+        },
+        "eventTime": {
+          "a_key": "A String",
+        },
+        "name": "A String",
+        "observedMetrics": {
+          "a_key": "A String",
+        },
+        "runId": "A String",
+        "sink": {
+          "description": "A String",
+          "metrics": {
+            "a_key": "A String",
+          },
+          "numOutputRows": "A String",
+        },
+        "sources": [
+          {
+            "description": "A String",
+            "endOffset": "A String",
+            "inputRowsPerSecond": 3.14,
+            "latestOffset": "A String",
+            "metrics": {
+              "a_key": "A String",
+            },
+            "numInputRows": "A String",
+            "processedRowsPerSecond": 3.14,
+            "startOffset": "A String",
+          },
+        ],
+        "stateOperators": [
+          {
+            "allRemovalsTimeMs": "A String",
+            "allUpdatesTimeMs": "A String",
+            "commitTimeMs": "A String",
+            "customMetrics": {
+              "a_key": "A String",
+            },
+            "memoryUsedBytes": "A String",
+            "numRowsDroppedByWatermark": "A String",
+            "numRowsRemoved": "A String",
+            "numRowsTotal": "A String",
+            "numRowsUpdated": "A String",
+            "numShufflePartitions": "A String",
+            "numStateStoreInstances": "A String",
+            "operatorName": "A String",
+          },
+        ],
+        "streamingQueryProgressId": "A String",
+        "timestamp": "A String",
+      },
+      "taskData": { # Data corresponding to tasks created by spark.
+        "accumulatorUpdates": [
+          {
+            "accumullableInfoId": "A String",
+            "name": "A String",
+            "update": "A String",
+            "value": "A String",
+          },
+        ],
+        "attempt": 42,
+        "durationMillis": "A String",
+        "errorMessage": "A String",
+        "executorId": "A String",
+        "executorLogs": {
+          "a_key": "A String",
+        },
+        "gettingResultTimeMillis": "A String",
+        "hasMetrics": True or False,
+        "host": "A String",
+        "index": 42,
+        "launchTime": "A String",
+        "partitionId": 42,
+        "resultFetchStart": "A String",
+        "schedulerDelayMillis": "A String",
+        "speculative": True or False,
+        "stageAttemptId": 42,
+        "stageId": "A String",
+        "status": "A String",
+        "taskId": "A String",
+        "taskLocality": "A String",
+        "taskMetrics": { # Executor Task Metrics
+          "diskBytesSpilled": "A String",
+          "executorCpuTimeNanos": "A String",
+          "executorDeserializeCpuTimeNanos": "A String",
+          "executorDeserializeTimeMillis": "A String",
+          "executorRunTimeMillis": "A String",
+          "inputMetrics": { # Metrics about the input data read by the task.
+            "bytesRead": "A String",
+            "recordsRead": "A String",
+          },
+          "jvmGcTimeMillis": "A String",
+          "memoryBytesSpilled": "A String",
+          "outputMetrics": { # Metrics about the data written by the task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+          },
+          "peakExecutionMemoryBytes": "A String",
+          "resultSerializationTimeMillis": "A String",
+          "resultSize": "A String",
+          "shuffleReadMetrics": { # Shuffle data read by the task.
+            "fetchWaitTimeMillis": "A String",
+            "localBlocksFetched": "A String",
+            "localBytesRead": "A String",
+            "recordsRead": "A String",
+            "remoteBlocksFetched": "A String",
+            "remoteBytesRead": "A String",
+            "remoteBytesReadToDisk": "A String",
+            "remoteReqsDuration": "A String",
+            "shufflePushReadMetrics": {
+              "corruptMergedBlockChunks": "A String",
+              "localMergedBlocksFetched": "A String",
+              "localMergedBytesRead": "A String",
+              "localMergedChunksFetched": "A String",
+              "mergedFetchFallbackCount": "A String",
+              "remoteMergedBlocksFetched": "A String",
+              "remoteMergedBytesRead": "A String",
+              "remoteMergedChunksFetched": "A String",
+              "remoteMergedReqsDuration": "A String",
+            },
+          },
+          "shuffleWriteMetrics": { # Shuffle data written by task.
+            "bytesWritten": "A String",
+            "recordsWritten": "A String",
+            "writeTimeNanos": "A String",
+          },
+        },
+      },
+    },
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response returned as an acknowledgement of receipt of data.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html index 20fc9336aa6..e35839107f8 100644 --- a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html @@ -148,7 +148,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -406,7 +406,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -441,6 +441,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -613,6 +617,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -674,6 +682,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -766,6 +778,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -834,7 +850,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1092,7 +1108,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -1127,6 +1143,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1299,6 +1319,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1360,6 +1384,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1452,6 +1480,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1547,7 +1579,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1805,7 +1837,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -1840,6 +1872,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2012,6 +2048,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2073,6 +2113,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2165,6 +2209,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2324,7 +2372,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -2582,7 +2630,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -2617,6 +2665,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2789,6 +2841,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2850,6 +2906,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2942,6 +3002,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3051,7 +3115,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -3309,7 +3373,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -3344,6 +3408,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3516,6 +3584,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3577,6 +3649,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3669,6 +3745,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3846,7 +3926,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -4104,7 +4184,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -4139,6 +4219,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4311,6 +4395,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4372,6 +4460,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4464,6 +4556,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4532,7 +4628,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -4790,7 +4886,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -4825,6 +4921,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4997,6 +5097,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -5058,6 +5162,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -5150,6 +5258,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", diff --git a/docs/dyn/dataproc_v1.projects.regions.clusters.html b/docs/dyn/dataproc_v1.projects.regions.clusters.html index 5c2ddc5b7d1..32dab20fd3f 100644 --- a/docs/dyn/dataproc_v1.projects.regions.clusters.html +++ b/docs/dyn/dataproc_v1.projects.regions.clusters.html @@ -150,7 +150,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -185,6 +185,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -357,6 +361,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -418,6 +426,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -510,6 +522,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -785,7 +801,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -820,6 +836,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -992,6 +1012,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1053,6 +1077,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1145,6 +1173,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1381,7 +1413,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -1416,6 +1448,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1588,6 +1624,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1649,6 +1689,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1741,6 +1785,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1897,7 +1945,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -1932,6 +1980,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2104,6 +2156,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2165,6 +2221,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2257,6 +2317,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2418,6 +2482,9 @@

Method Details

The object takes the form of: { # A request to repair a cluster. + "cluster": { # Cluster to be repaired # Optional. Cluster to be repaired + "clusterRepairAction": "A String", # Required. Repair action to take on the cluster resource. + }, "clusterUuid": "A String", # Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist. "gracefulDecommissionTimeout": "A String", # Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping—Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+. "nodePools": [ # Optional. Node pools and corresponding repair action to be taken. All node pools should be unique in this request. i.e. Multiple entries for the same node pool id are not allowed. diff --git a/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html b/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html index 97990b48fb4..f33cfedfd64 100644 --- a/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html +++ b/docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html @@ -105,7 +105,7 @@

Method Details

The object takes the form of: { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -140,6 +140,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -219,7 +223,7 @@

Method Details

An object of the form: { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -254,6 +258,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", diff --git a/docs/dyn/dataproc_v1.projects.regions.jobs.html b/docs/dyn/dataproc_v1.projects.regions.jobs.html index b2d58752de6..c646c05ff26 100644 --- a/docs/dyn/dataproc_v1.projects.regions.jobs.html +++ b/docs/dyn/dataproc_v1.projects.regions.jobs.html @@ -155,7 +155,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -466,7 +466,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -805,7 +805,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1106,7 +1106,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1384,7 +1384,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1723,7 +1723,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -2002,7 +2002,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -2283,7 +2283,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. diff --git a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html index 13fbfe170df..938a5652027 100644 --- a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html @@ -148,7 +148,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -406,7 +406,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -441,6 +441,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -613,6 +617,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -674,6 +682,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -766,6 +778,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -834,7 +850,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1092,7 +1108,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -1127,6 +1143,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1299,6 +1319,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1360,6 +1384,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1452,6 +1480,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -1547,7 +1579,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -1805,7 +1837,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -1840,6 +1872,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2012,6 +2048,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2073,6 +2113,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2165,6 +2209,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2324,7 +2372,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -2582,7 +2630,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -2617,6 +2665,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2789,6 +2841,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2850,6 +2906,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -2942,6 +3002,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3051,7 +3115,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -3309,7 +3373,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -3344,6 +3408,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3516,6 +3584,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3577,6 +3649,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3669,6 +3745,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -3846,7 +3926,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -4104,7 +4184,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -4139,6 +4219,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4311,6 +4395,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4372,6 +4460,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4464,6 +4556,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4532,7 +4628,7 @@

Method Details

}, "mainClass": "A String", # The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris. "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class. - "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code. + "properties": { # Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code. "a_key": "A String", }, "savepointUri": "A String", # Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job. @@ -4790,7 +4886,7 @@

Method Details

"auxiliaryNodeGroups": [ # Optional. The node group settings. { # Node group identification and configuration information. "nodeGroup": { # Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource. # Required. Node group configuration. - "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn. + "labels": { # Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels. "a_key": "A String", }, "name": "A String", # The Node group resource name (https://aip.dev/122). @@ -4825,6 +4921,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -4997,6 +5097,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -5058,6 +5162,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", @@ -5150,6 +5258,10 @@

Method Details

"vmCount": 42, # Output only. Number of VM provisioned with the machine_type. }, ], + "provisioningModelMix": { # Defines how Dataproc should create VMs with a mixture of provisioning models. # Optional. Defines how the Group selects the provisioning model to ensure required reliability. + "standardCapacityBase": 42, # Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. + "standardCapacityPercentAboveBase": 42, # Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot. + }, }, "instanceNames": [ # Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. "A String", diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.customModels.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.customModels.html index 371b92fe166..290b55aed18 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.customModels.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.customModels.html @@ -105,6 +105,7 @@

Method Details

{ # Metadata that describes a custom tuned model. "createTime": "A String", # Timestamp the Model was created at. "displayName": "A String", # The display name of the model. + "errorMessage": "A String", # Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`. "metrics": { # The metrics of the trained model. "a_key": 3.14, }, diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html index 717951a41ca..a5a3e4a901b 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.html @@ -280,6 +280,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -430,6 +432,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
@@ -538,6 +542,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }, @@ -634,6 +640,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -712,6 +720,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html index 2ec61a880a5..45710b44f23 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.html @@ -155,6 +155,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -267,6 +268,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -320,6 +322,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -380,6 +383,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -422,6 +426,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html index 08defd3edba..78127e0efa2 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.html @@ -272,6 +272,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -422,6 +424,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
@@ -530,6 +534,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }, @@ -626,6 +632,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -704,6 +712,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
diff --git a/docs/dyn/discoveryengine_v1.projects.locations.html b/docs/dyn/discoveryengine_v1.projects.locations.html index 4631def3024..0be1bb6677c 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.html @@ -90,9 +90,9 @@

Instance Methods

Returns the groundingConfigs Resource.

- identity_mapping_stores() + identityMappingStores()

-

Returns the identity_mapping_stores Resource.

+

Returns the identityMappingStores Resource.

operations() diff --git a/docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.html b/docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.html new file mode 100644 index 00000000000..dcff2115650 --- /dev/null +++ b/docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.html @@ -0,0 +1,91 @@ + + + +

Discovery Engine API . projects . locations . identityMappingStores

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.operations.html b/docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.operations.html new file mode 100644 index 00000000000..6b2ea660cf2 --- /dev/null +++ b/docs/dyn/discoveryengine_v1.projects.locations.identityMappingStores.operations.html @@ -0,0 +1,187 @@ + + + +

Discovery Engine API . projects . locations . identityMappingStores . operations

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html index 4b08b953552..8c7e7165c38 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.customModels.html @@ -105,6 +105,7 @@

Method Details

{ # Metadata that describes a custom tuned model. "createTime": "A String", # Timestamp the Model was created at. "displayName": "A String", # The display name of the model. + "errorMessage": "A String", # Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`. "metrics": { # The metrics of the trained model. "a_key": 3.14, }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html index c739dd5ac2f..893755535d5 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.html @@ -322,6 +322,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -508,6 +510,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
@@ -703,6 +707,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }, @@ -835,6 +841,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -949,6 +957,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html index 3ff72105c5a..b40cc51ad45 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.servingConfigs.html @@ -180,6 +180,12 @@

Method Details

], "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the Answer response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customers might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) "maxReturnResults": 42, # Number of search results to return. The default value is 10. + "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # Optional. Specification to enable natural language understanding capabilities for search requests. + "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED. + "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names. + "A String", + ], + }, "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`. See [parse and chunk documents](https://cloud.google.com/generative-ai-app-builder/docs/parse-chunk-documents) }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html index d2c332a5442..b43b946c635 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.html @@ -164,6 +164,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. @@ -293,6 +294,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. @@ -363,6 +365,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. @@ -440,6 +443,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. @@ -499,6 +503,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. @@ -570,6 +575,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. @@ -641,6 +647,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "mediaRecommendationEngineConfig": { # Additional config specs for a Media Recommendation engine. # Configurations for the Media Engine. Only applicable on the data stores with solution_type SOLUTION_TYPE_RECOMMENDATION and IndustryVertical.MEDIA vertical. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html index 93149eb1d5e..cdfc0b548d1 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.servingConfigs.html @@ -180,6 +180,12 @@

Method Details

], "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the Answer response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customers might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) "maxReturnResults": 42, # Number of search results to return. The default value is 10. + "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # Optional. Specification to enable natural language understanding capabilities for search requests. + "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED. + "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names. + "A String", + ], + }, "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`. See [parse and chunk documents](https://cloud.google.com/generative-ai-app-builder/docs/parse-chunk-documents) }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html index 50b429fb5c5..fac08d7bed4 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.html @@ -314,6 +314,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -500,6 +502,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
@@ -695,6 +699,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }, @@ -827,6 +833,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -941,6 +949,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html index fb56420560b..c984cb18af0 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.servingConfigs.html @@ -180,6 +180,12 @@

Method Details

], "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the Answer response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customers might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) "maxReturnResults": 42, # Number of search results to return. The default value is 10. + "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # Optional. Specification to enable natural language understanding capabilities for search requests. + "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED. + "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names. + "A String", + ], + }, "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`. See [parse and chunk documents](https://cloud.google.com/generative-ai-app-builder/docs/parse-chunk-documents) }, diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.html index 43ac142966e..b4268c363a5 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.html @@ -95,9 +95,9 @@

Instance Methods

Returns the groundingConfigs Resource.

- identity_mapping_stores() + identityMappingStores()

-

Returns the identity_mapping_stores Resource.

+

Returns the identityMappingStores Resource.

operations() diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.html new file mode 100644 index 00000000000..87566478132 --- /dev/null +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.html @@ -0,0 +1,91 @@ + + + +

Discovery Engine API . projects . locations . identityMappingStores

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.operations.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.operations.html new file mode 100644 index 00000000000..567d42a2e55 --- /dev/null +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.identityMappingStores.operations.html @@ -0,0 +1,187 @@ + + + +

Discovery Engine API . projects . locations . identityMappingStores . operations

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html index cd55149d9a5..cab73c7d78a 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.customModels.html @@ -105,6 +105,7 @@

Method Details

{ # Metadata that describes a custom tuned model. "createTime": "A String", # Timestamp the Model was created at. "displayName": "A String", # The display name of the model. + "errorMessage": "A String", # Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`. "metrics": { # The metrics of the trained model. "a_key": 3.14, }, diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html index 989b01600c5..72d4b136dbc 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.html @@ -289,6 +289,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -448,6 +450,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
@@ -565,6 +569,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }, @@ -670,6 +676,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -757,6 +765,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html index 526b8fb52cf..5275a0926b7 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.servingConfigs.html @@ -177,6 +177,12 @@

Method Details

], "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the Answer response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customers might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) "maxReturnResults": 42, # Number of search results to return. The default value is 10. + "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # Optional. Specification to enable natural language understanding capabilities for search requests. + "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED. + "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names. + "A String", + ], + }, "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`. See [parse and chunk documents](https://cloud.google.com/generative-ai-app-builder/docs/parse-chunk-documents) }, diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html index 5abd1dab652..419370449b3 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.html @@ -164,6 +164,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -276,6 +277,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -329,6 +331,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -389,6 +392,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -431,6 +435,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -485,6 +490,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. @@ -539,6 +545,7 @@

Method Details

"dataStoreIds": [ # The data stores associated with this engine. For SOLUTION_TYPE_SEARCH and SOLUTION_TYPE_RECOMMENDATION type of engines, they can only associate with at most one data store. If solution_type is SOLUTION_TYPE_CHAT, multiple DataStores in the same Collection can be associated here. Note that when used in CreateEngineRequest, one DataStore id must be provided as the system will use it for necessary initializations. "A String", ], + "disableAnalytics": True or False, # Optional. Whether to disable analytics for searches performed on this engine. "displayName": "A String", # Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters. "industryVertical": "A String", # The industry vertical that the engine registers. The restriction of the Engine industry vertical is based on DataStore: If unspecified, default to `GENERIC`. Vertical on Engine has to match vertical of the DataStore linked to the engine. "name": "A String", # Immutable. The fully qualified resource name of the engine. This field must be a UTF-8 encoded string with a length limit of 1024 characters. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` engine should be 1-63 characters, and valid characters are /a-z0-9*/. Otherwise, an INVALID_ARGUMENT error is returned. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html index 52c1d2559eb..193edc71974 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.servingConfigs.html @@ -177,6 +177,12 @@

Method Details

], "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the Answer response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customers might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) "maxReturnResults": 42, # Number of search results to return. The default value is 10. + "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # Optional. Specification to enable natural language understanding capabilities for search requests. + "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED. + "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names. + "A String", + ], + }, "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`. See [parse and chunk documents](https://cloud.google.com/generative-ai-app-builder/docs/parse-chunk-documents) }, diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html index 5e5fc04bd1f..93459d4fe2a 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.html @@ -281,6 +281,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -440,6 +442,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
@@ -557,6 +561,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }, @@ -662,6 +668,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, } @@ -749,6 +757,8 @@

Method Details

}, "workspaceConfig": { # Config to store data store type configuration for workspace data # Config to store data store type configuration for workspace data. This must be set when DataStore.content_config is set as DataStore.ContentConfig.GOOGLE_WORKSPACE. "dasherCustomerId": "A String", # Obfuscated Dasher customer ID. + "superAdminEmailAddress": "A String", # Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. + "superAdminServiceAccount": "A String", # Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion. "type": "A String", # The Google Workspace data source. }, }
diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html index 361692b3c17..90fdd9d331a 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.servingConfigs.html @@ -177,6 +177,12 @@

Method Details

], "filter": "A String", # The filter syntax consists of an expression language for constructing a predicate from one or more fields of the documents being filtered. Filter expression is case-sensitive. This will be used to filter search results which may affect the Answer response. If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. Filtering in Vertex AI Search is done by mapping the LHS filter key to a key property defined in the Vertex AI Search backend -- this mapping is defined by the customer in their schema. For example a media customers might have a field 'name' in their schema. In this case the filter would look like this: filter --> name:'ANY("king kong")' For more information about filtering including syntax and filter operators, see [Filter](https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata) "maxReturnResults": 42, # Number of search results to return. The default value is 10. + "naturalLanguageQueryUnderstandingSpec": { # Specification to enable natural language understanding capabilities for search requests. # Optional. Specification to enable natural language understanding capabilities for search requests. + "filterExtractionCondition": "A String", # The condition under which filter extraction should occur. Default to Condition.DISABLED. + "geoSearchQueryDetectionFieldNames": [ # Field names used for location-based filtering, where geolocation filters are detected in natural language search queries. Only valid when the FilterExtractionCondition is set to `ENABLED`. If this field is set, it overrides the field names set in ServingConfig.geo_search_query_detection_field_names. + "A String", + ], + }, "orderBy": "A String", # The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned. "searchResultMode": "A String", # Specifies the search result mode. If unspecified, the search result mode defaults to `DOCUMENTS`. See [parse and chunk documents](https://cloud.google.com/generative-ai-app-builder/docs/parse-chunk-documents) }, diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.html index baf2b1ee0fa..28592bba24c 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.html @@ -94,6 +94,11 @@

Instance Methods

Returns the groundingConfigs Resource.

+

+ identityMappingStores() +

+

Returns the identityMappingStores Resource.

+

operations()

diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.html b/docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.html new file mode 100644 index 00000000000..8ca74eb98aa --- /dev/null +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.html @@ -0,0 +1,91 @@ + + + +

Discovery Engine API . projects . locations . identityMappingStores

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.operations.html b/docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.operations.html new file mode 100644 index 00000000000..2440d68d55f --- /dev/null +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.identityMappingStores.operations.html @@ -0,0 +1,187 @@ + + + +

Discovery Engine API . projects . locations . identityMappingStores . operations

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/displayvideo_v3.advertisers.adGroups.html b/docs/dyn/displayvideo_v3.advertisers.adGroups.html index bac39c81ecf..8a50c7cf07a 100644 --- a/docs/dyn/displayvideo_v3.advertisers.adGroups.html +++ b/docs/dyn/displayvideo_v3.advertisers.adGroups.html @@ -508,7 +508,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the ad group. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, }
@@ -580,7 +580,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the ad group. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, }, ], diff --git a/docs/dyn/displayvideo_v3.advertisers.lineItems.html b/docs/dyn/displayvideo_v3.advertisers.lineItems.html index 2cd6a0577d5..146039cc301 100644 --- a/docs/dyn/displayvideo_v3.advertisers.lineItems.html +++ b/docs/dyn/displayvideo_v3.advertisers.lineItems.html @@ -940,7 +940,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -1165,7 +1165,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -1350,7 +1350,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -1603,7 +1603,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -1796,7 +1796,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -1994,7 +1994,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -2199,7 +2199,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. @@ -2385,7 +2385,7 @@

Method Details

"targetingExpansion": { # Settings that control the [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. # The [optimized targeting](//support.google.com/displayvideo/answer/12060859) settings of the line item. This config is only applicable for display, video, or audio line items that use automated bidding and positively target eligible audience lists. "audienceExpansionLevel": "A String", # Output only. Magnitude of expansion for eligible first-party user lists under this ad group. This field only applies to YouTube and Partners line item and ad group resources. "audienceExpansionSeedListExcluded": True or False, # Output only. Whether to exclude seed list for audience expansion. This field only applies to YouTube and Partners line item and ad group resources. - "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies). + "enableOptimizedTargeting": True or False, # Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM` }, "updateTime": "A String", # Output only. The timestamp when the line item was last updated. Assigned by the system. "warningMessages": [ # Output only. The warning messages generated by the line item. These warnings do not block saving the line item, but some may block the line item from running. diff --git a/docs/dyn/firebaseappcheck_v1.oauthClients.html b/docs/dyn/firebaseappcheck_v1.oauthClients.html index 518793277f6..b62c69cb3b2 100644 --- a/docs/dyn/firebaseappcheck_v1.oauthClients.html +++ b/docs/dyn/firebaseappcheck_v1.oauthClients.html @@ -119,8 +119,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -150,8 +150,8 @@

Method Details

An object of the form: { # Response message for the ExchangeAppAttestAttestation method. - "appCheckToken": { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. # Encapsulates an App Check token. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + "appCheckToken": { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. # Encapsulates an App Check token. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }, "artifact": "A String", # An artifact that can be used in future calls to ExchangeAppAttestAssertion. @@ -180,8 +180,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
diff --git a/docs/dyn/firebaseappcheck_v1.projects.apps.html b/docs/dyn/firebaseappcheck_v1.projects.apps.html index 7eed8c28614..9cbcf64883a 100644 --- a/docs/dyn/firebaseappcheck_v1.projects.apps.html +++ b/docs/dyn/firebaseappcheck_v1.projects.apps.html @@ -175,8 +175,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -206,8 +206,8 @@

Method Details

An object of the form: { # Response message for the ExchangeAppAttestAttestation method. - "appCheckToken": { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. # Encapsulates an App Check token. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + "appCheckToken": { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. # Encapsulates an App Check token. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }, "artifact": "A String", # An artifact that can be used in future calls to ExchangeAppAttestAssertion. @@ -236,8 +236,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -264,8 +264,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -292,8 +292,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -320,8 +320,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -348,8 +348,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -376,8 +376,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
@@ -403,8 +403,8 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }
diff --git a/docs/dyn/firebaseappcheck_v1.projects.services.html b/docs/dyn/firebaseappcheck_v1.projects.services.html index 2046620e6ea..4e379859beb 100644 --- a/docs/dyn/firebaseappcheck_v1.projects.services.html +++ b/docs/dyn/firebaseappcheck_v1.projects.services.html @@ -110,9 +110,9 @@

Method Details

{ # Request message for the BatchUpdateServices method. "requests": [ # Required. The request messages specifying the Services to update. A maximum of 100 objects can be updated in a batch. { # Request message for the UpdateService method as well as an individual update message for the BatchUpdateServices method. - "service": { # The enforcement configuration for a Firebase service supported by App Check. # Required. The Service to update. The Service's `name` field is used to identify the Service to be updated, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) + "service": { # The enforcement configuration for a Firebase service supported by App Check. # Required. The Service to update. The Service's `name` field is used to identify the Service to be updated, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) }, "updateMask": "A String", # Required. A comma-separated list of names of fields in the Service to update. Example: `enforcement_mode`. }, @@ -132,7 +132,7 @@

Method Details

"services": [ # Service objects after the updates have been applied. { # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) }, ], }
@@ -148,7 +148,7 @@

Method Details

Gets the Service configuration for the specified service name.
 
 Args:
-  name: string, Required. The relative resource name of the Service to retrieve, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) (required)
+  name: string, Required. The relative resource name of the Service to retrieve, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -159,7 +159,7 @@ 

Method Details

{ # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) }
@@ -184,7 +184,7 @@

Method Details

"services": [ # The Services retrieved. { # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) }, ], }
@@ -209,13 +209,13 @@

Method Details

Updates the specified Service configuration.
 
 Args:
-  name: string, Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) (required)
+  name: string, Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) (required)
   body: object, The request body.
     The object takes the form of:
 
 { # The enforcement configuration for a Firebase service supported by App Check.
   "enforcementMode": "A String", # Required. The App Check enforcement mode for this service.
-  "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore)
+  "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS)
 }
 
   updateMask: string, Required. A comma-separated list of names of fields in the Service to update. Example: `enforcement_mode`.
@@ -229,7 +229,7 @@ 

Method Details

{ # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS) }
diff --git a/docs/dyn/firebaseappcheck_v1.projects.services.resourcePolicies.html b/docs/dyn/firebaseappcheck_v1.projects.services.resourcePolicies.html index c4dd766f733..cc7d736e763 100644 --- a/docs/dyn/firebaseappcheck_v1.projects.services.resourcePolicies.html +++ b/docs/dyn/firebaseappcheck_v1.projects.services.resourcePolicies.html @@ -111,11 +111,11 @@

Method Details

{ # Request message for the BatchUpdateResourcePolicies method. "requests": [ # Required. The request messages specifying the ResourcePolicy objects to update. A maximum of 100 objects can be updated in a batch. { # Request message for the UpdateResourcePolicy method as well as an individual update message for the BatchUpdateResourcePolicies method. - "resourcePolicy": { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. # Required. The ResourcePolicy to update. The ResourcePolicy's `name` field is used to identify the ResourcePolicy to be updated, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) + "resourcePolicy": { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. # Required. The ResourcePolicy to update. The ResourcePolicy's `name` field is used to identify the ResourcePolicy to be updated, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. }, "updateMask": "A String", # Required. A comma-separated list of names of fields in the ResourcePolicy to update. Example: `enforcement_mode`. @@ -134,11 +134,11 @@

Method Details

{ # Response message for the BatchUpdateResourcePolicies method. "resourcePolicies": [ # ResourcePolicy objects after the updates have been applied. - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. }, ], @@ -159,11 +159,11 @@

Method Details

body: object, The request body. The object takes the form of: -{ # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. +{ # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -175,11 +175,11 @@

Method Details

Returns: An object of the form: - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -217,11 +217,11 @@

Method Details

Returns: An object of the form: - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -246,11 +246,11 @@

Method Details

{ # Response message for the ListResourcePolicies method. "nextPageToken": "A String", # If the result list is too large to fit in a single response, then a token is returned. If the string is empty or omitted, then this response is the last page of results. This token can be used in a subsequent call to ListResourcePolicies to find the next group of ResourcePolicy objects. Page tokens are short-lived and should not be persisted. "resourcePolicies": [ # The ResourcePolicy objects retrieved. - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. }, ], @@ -280,11 +280,11 @@

Method Details

body: object, The request body. The object takes the form of: -{ # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. +{ # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -297,11 +297,11 @@

Method Details

Returns: An object of the form: - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } diff --git a/docs/dyn/firebaseappcheck_v1beta.oauthClients.html b/docs/dyn/firebaseappcheck_v1beta.oauthClients.html index 81620c2dea0..2692b512f45 100644 --- a/docs/dyn/firebaseappcheck_v1beta.oauthClients.html +++ b/docs/dyn/firebaseappcheck_v1beta.oauthClients.html @@ -119,9 +119,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -151,9 +151,9 @@

Method Details

An object of the form: { # Response message for the ExchangeAppAttestAttestation method. - "appCheckToken": { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. # Encapsulates an App Check token. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + "appCheckToken": { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. # Encapsulates an App Check token. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }, "artifact": "A String", # An artifact that can be used in future calls to ExchangeAppAttestAssertion. @@ -186,9 +186,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.apps.html b/docs/dyn/firebaseappcheck_v1beta.projects.apps.html index 8f92a68c993..4157ade1006 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.apps.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.apps.html @@ -183,9 +183,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -215,9 +215,9 @@

Method Details

An object of the form: { # Response message for the ExchangeAppAttestAttestation method. - "appCheckToken": { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. # Encapsulates an App Check token. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + "appCheckToken": { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. # Encapsulates an App Check token. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. }, "artifact": "A String", # An artifact that can be used in future calls to ExchangeAppAttestAssertion. @@ -250,9 +250,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -279,9 +279,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -308,9 +308,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -337,9 +337,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -366,9 +366,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -394,9 +394,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -423,9 +423,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } @@ -451,9 +451,9 @@

Method Details

Returns: An object of the form: - { # Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check. - "attestationToken": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. - "token": "A String", # An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. + { # Encapsulates an *App Check token*, which are used to access backend services protected by App Check. + "attestationToken": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. + "token": "A String", # The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries. "ttl": "A String", # The duration from the time this token is minted until its expiration. This field is intended to ease client-side token management, since the client may have clock skew, but is still able to accurately measure a duration. } diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.services.html b/docs/dyn/firebaseappcheck_v1beta.projects.services.html index e0e3f483ea2..dc0bec1b23c 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.services.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.services.html @@ -113,7 +113,7 @@

Method Details

"service": { # The enforcement configuration for a Firebase service supported by App Check. # Required. The Service to update. The Service's `name` field is used to identify the Service to be updated, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) For Firebase Authentication to work with App Check, you must first upgrade to [Firebase Authentication with Identity Platform](https://firebase.google.com/docs/auth#identity-platform). "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) "updateTime": "A String", # Output only. Timestamp when this service configuration object was most recently updated. }, "updateMask": "A String", # Required. A comma-separated list of names of fields in the Service to update. Example: `enforcement_mode`. @@ -135,7 +135,7 @@

Method Details

{ # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) "updateTime": "A String", # Output only. Timestamp when this service configuration object was most recently updated. }, ], @@ -164,7 +164,7 @@

Method Details

{ # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) "updateTime": "A String", # Output only. Timestamp when this service configuration object was most recently updated. } @@ -191,7 +191,7 @@

Method Details

{ # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) "updateTime": "A String", # Output only. Timestamp when this service configuration object was most recently updated. }, ], @@ -217,14 +217,14 @@

Method Details

Updates the specified Service configuration.
 
 Args:
-  name: string, Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) (required)
+  name: string, Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) (required)
   body: object, The request body.
     The object takes the form of:
 
 { # The enforcement configuration for a Firebase service supported by App Check.
   "enforcementMode": "A String", # Required. The App Check enforcement mode for this service.
   "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232.
-  "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform)
+  "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS)
   "updateTime": "A String", # Output only. Timestamp when this service configuration object was most recently updated.
 }
 
@@ -240,7 +240,7 @@ 

Method Details

{ # The enforcement configuration for a Firebase service supported by App Check. "enforcementMode": "A String", # Required. The App Check enforcement mode for this service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. - "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) + "name": "A String", # Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS) "updateTime": "A String", # Output only. Timestamp when this service configuration object was most recently updated. }
diff --git a/docs/dyn/firebaseappcheck_v1beta.projects.services.resourcePolicies.html b/docs/dyn/firebaseappcheck_v1beta.projects.services.resourcePolicies.html index 34d404b2bf9..44e12841f87 100644 --- a/docs/dyn/firebaseappcheck_v1beta.projects.services.resourcePolicies.html +++ b/docs/dyn/firebaseappcheck_v1beta.projects.services.resourcePolicies.html @@ -111,11 +111,11 @@

Method Details

{ # Request message for the BatchUpdateResourcePolicies method. "requests": [ # Required. The request messages specifying the ResourcePolicy objects to update. A maximum of 100 objects can be updated in a batch. { # Request message for the UpdateResourcePolicy method as well as an individual update message for the BatchUpdateResourcePolicies method. - "resourcePolicy": { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. # Required. The ResourcePolicy to update. The ResourcePolicy's `name` field is used to identify the ResourcePolicy to be updated, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) + "resourcePolicy": { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. # Required. The ResourcePolicy to update. The ResourcePolicy's `name` field is used to identify the ResourcePolicy to be updated, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. }, "updateMask": "A String", # Required. A comma-separated list of names of fields in the ResourcePolicy to update. Example: `enforcement_mode`. @@ -134,11 +134,11 @@

Method Details

{ # Response message for the BatchUpdateResourcePolicies method. "resourcePolicies": [ # ResourcePolicy objects after the updates have been applied. - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. }, ], @@ -159,11 +159,11 @@

Method Details

body: object, The request body. The object takes the form of: -{ # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. +{ # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -175,11 +175,11 @@

Method Details

Returns: An object of the form: - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -217,11 +217,11 @@

Method Details

Returns: An object of the form: - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -246,11 +246,11 @@

Method Details

{ # Response message for the ListResourcePolicies method. "nextPageToken": "A String", # If the result list is too large to fit in a single response, then a token is returned. If the string is empty or omitted, then this response is the last page of results. This token can be used in a subsequent call to ListResourcePolicies to find the next group of ResourcePolicy objects. Page tokens are short-lived and should not be persisted. "resourcePolicies": [ # The ResourcePolicy objects retrieved. - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. }, ], @@ -280,11 +280,11 @@

Method Details

body: object, The request body. The object takes the form of: -{ # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. +{ # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } @@ -297,11 +297,11 @@

Method Details

Returns: An object of the form: - { # App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration. + { # App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration. "enforcementMode": "A String", # Required. The App Check enforcement mode for this resource. This will override the EnforcementMode setting on the parent service. "etag": "A String", # This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. This etag is strongly validated as defined by RFC 7232. "name": "A String", # Required. Identifier. The relative name of the resource policy object, in the format: ``` projects/{project_number}/services/{service_id}/resourcePolicies/{resource_policy_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `oauth2.googleapis.com` (Google Identity for iOS) `resource_policy_id` is a system-generated UID. - "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. + "targetResource": "A String", # Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created. "updateTime": "A String", # Output only. Timestamp when this resource policy configuration object was most recently updated. } diff --git a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html index 53e880de4eb..afd0b366f03 100644 --- a/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html +++ b/docs/dyn/firebaseappdistribution_v1alpha.projects.apps.releases.tests.html @@ -74,6 +74,9 @@

Firebase App Distribution API . projects . apps . releases . tests

Instance Methods

+

+ cancel(name, x__xgafv=None)

+

Abort automated test run on release.

close()

Close httplib2 connections.

@@ -90,6 +93,24 @@

Instance Methods

list_next()

Retrieves the next page of results.

Method Details

+
+ cancel(name, x__xgafv=None) +
Abort automated test run on release.
+
+Args:
+  name: string, Required. The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The (empty) response message for `CancelReleaseTest`.
+}
+
+
close()
Close httplib2 connections.
@@ -217,9 +238,10 @@

Method Details

"username": "A String", # Optional. Username for automated tests }, "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}` + "testState": "A String", # Output only. The state of the release test. } - releaseTestId: string, Optional. The ID to use for the test, which will become the final component of the tests's resource name. This value should be 4-63 characters, and valid characters are /a-z-/. If it is not provided one will be automatically generated. + releaseTestId: string, Optional. The ID to use for the test, which will become the final component of the test's resource name. This value should be 4-63 characters, and valid characters are /a-z-/. If it is not provided one will be automatically generated. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -341,6 +363,7 @@

Method Details

"username": "A String", # Optional. Username for automated tests }, "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}` + "testState": "A String", # Output only. The state of the release test. }
@@ -471,6 +494,7 @@

Method Details

"username": "A String", # Optional. Username for automated tests }, "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}` + "testState": "A String", # Output only. The state of the release test. } @@ -606,6 +630,7 @@

Method Details

"username": "A String", # Optional. Username for automated tests }, "name": "A String", # The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}` + "testState": "A String", # Output only. The state of the release test. }, ], } diff --git a/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html b/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html index 3455f518c47..168ad2ad28e 100644 --- a/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html +++ b/docs/dyn/firebaseml_v2beta.projects.locations.publishers.models.html @@ -137,6 +137,7 @@

Method Details

}, ], "generationConfig": { # Generation config. # Optional. Generation config that the model will use to generate the response. + "audioTimestamp": True or False, # Optional. If enabled, audio timestamp will be included in the request to the model. "candidateCount": 42, # Optional. Number of candidates to generate. "frequencyPenalty": 3.14, # Optional. Frequency penalties. "logprobs": 42, # Optional. Logit probabilities. @@ -150,7 +151,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -242,7 +243,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -276,7 +277,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -397,6 +398,7 @@

Method Details

}, ], "generationConfig": { # Generation config. # Optional. Generation config. + "audioTimestamp": True or False, # Optional. If enabled, audio timestamp will be included in the request to the model. "candidateCount": 42, # Optional. Number of candidates to generate. "frequencyPenalty": 3.14, # Optional. Frequency penalties. "logprobs": 42, # Optional. Logit probabilities. @@ -410,7 +412,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -516,7 +518,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -550,7 +552,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -703,7 +705,7 @@

Method Details

}, ], "retrievalMetadata": { # Metadata related to retrieval in the grounding flow. # Optional. Output only. Retrieval metadata. - "googleSearchDynamicRetrievalScore": 3.14, # Optional. Score indicating how likely information from google search could help answer the prompt. The score is in the range [0, 1], where 0 is the least likely and 1 is the most likely. This score is only populated when google search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger google search. + "googleSearchDynamicRetrievalScore": 3.14, # Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search. }, "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. "A String", @@ -819,6 +821,7 @@

Method Details

}, ], "generationConfig": { # Generation config. # Optional. Generation config. + "audioTimestamp": True or False, # Optional. If enabled, audio timestamp will be included in the request to the model. "candidateCount": 42, # Optional. Number of candidates to generate. "frequencyPenalty": 3.14, # Optional. Frequency penalties. "logprobs": 42, # Optional. Logit probabilities. @@ -832,7 +835,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -938,7 +941,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -972,7 +975,7 @@

Method Details

], "default": "", # Optional. Default value of the data. "description": "A String", # Optional. The description of the data. - "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "enum": [ # Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:["101", "201", "301"]} "A String", ], "example": "", # Optional. Example of the object. Will only populated when the object is the root. @@ -1125,7 +1128,7 @@

Method Details

}, ], "retrievalMetadata": { # Metadata related to retrieval in the grounding flow. # Optional. Output only. Retrieval metadata. - "googleSearchDynamicRetrievalScore": 3.14, # Optional. Score indicating how likely information from google search could help answer the prompt. The score is in the range [0, 1], where 0 is the least likely and 1 is the most likely. This score is only populated when google search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger google search. + "googleSearchDynamicRetrievalScore": 3.14, # Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search. }, "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. "A String", diff --git a/docs/dyn/index.md b/docs/dyn/index.md index 5c4e9221d34..4099288b29c 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -41,10 +41,6 @@ * [v2](http://googleapis.github.io/google-api-python-client/docs/dyn/adsense_v2.html) -## adsensehost -* [v4.1](http://googleapis.github.io/google-api-python-client/docs/dyn/adsensehost_v4_1.html) - - ## adsenseplatform * [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/adsenseplatform_v1.html) * [v1alpha](http://googleapis.github.io/google-api-python-client/docs/dyn/adsenseplatform_v1alpha.html) diff --git a/docs/dyn/language_v2.documents.html b/docs/dyn/language_v2.documents.html index e4af33970c9..55fd515202e 100644 --- a/docs/dyn/language_v2.documents.html +++ b/docs/dyn/language_v2.documents.html @@ -237,6 +237,7 @@

Method Details

{ # Represents a category returned from the text classifier. "confidence": 3.14, # The classifier's confidence of the category. Number represents how certain the classifier is that this category represents the given text. "name": "A String", # The name of the category representing the document. + "severity": 3.14, # Optional. The classifier's severity of the category. This is only present when the ModerateTextRequest.ModelVersion is set to MODEL_VERSION_2, and the corresponding category has a severity score. }, ], "documentSentiment": { # Represents the feeling associated with the entire text or entities in the text. # The overall sentiment for the document. Populated if the user enables AnnotateTextRequest.Features.extract_document_sentiment. @@ -276,6 +277,7 @@

Method Details

{ # Represents a category returned from the text classifier. "confidence": 3.14, # The classifier's confidence of the category. Number represents how certain the classifier is that this category represents the given text. "name": "A String", # The name of the category representing the document. + "severity": 3.14, # Optional. The classifier's severity of the category. This is only present when the ModerateTextRequest.ModelVersion is set to MODEL_VERSION_2, and the corresponding category has a severity score. }, ], "sentences": [ # Sentences in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_document_sentiment. @@ -323,6 +325,7 @@

Method Details

{ # Represents a category returned from the text classifier. "confidence": 3.14, # The classifier's confidence of the category. Number represents how certain the classifier is that this category represents the given text. "name": "A String", # The name of the category representing the document. + "severity": 3.14, # Optional. The classifier's severity of the category. This is only present when the ModerateTextRequest.ModelVersion is set to MODEL_VERSION_2, and the corresponding category has a severity score. }, ], "languageCode": "A String", # The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details. @@ -350,6 +353,7 @@

Method Details

"languageCode": "A String", # Optional. The language of the document (if not specified, the language is automatically detected). Both ISO and BCP-47 language codes are accepted. [Language Support](https://cloud.google.com/natural-language/docs/languages) lists currently supported languages for each API method. If the language (either specified by the caller or automatically detected) is not supported by the called API method, an `INVALID_ARGUMENT` error is returned. "type": "A String", # Required. If the type is not set or is `TYPE_UNSPECIFIED`, returns an `INVALID_ARGUMENT` error. }, + "modelVersion": "A String", # Optional. The model version to use for ModerateText. } x__xgafv: string, V1 error format. @@ -367,6 +371,7 @@

Method Details

{ # Represents a category returned from the text classifier. "confidence": 3.14, # The classifier's confidence of the category. Number represents how certain the classifier is that this category represents the given text. "name": "A String", # The name of the category representing the document. + "severity": 3.14, # Optional. The classifier's severity of the category. This is only present when the ModerateTextRequest.ModelVersion is set to MODEL_VERSION_2, and the corresponding category has a severity score. }, ], } diff --git a/docs/dyn/merchantapi_accounts_v1beta.accounts.html b/docs/dyn/merchantapi_accounts_v1beta.accounts.html index 11c5cb6bc2c..c088f6fdb9a 100644 --- a/docs/dyn/merchantapi_accounts_v1beta.accounts.html +++ b/docs/dyn/merchantapi_accounts_v1beta.accounts.html @@ -141,14 +141,14 @@

Instance Methods

createAndConfigure(body=None, x__xgafv=None)

Creates a standalone Merchant Center account with additional configuration. Adds the user that makes the request as an admin for the new account.

- delete(name, x__xgafv=None)

-

Deletes the specified account regardless of its type: standalone, MCA or sub-account. Deleting an MCA leads to the deletion of all of its sub-accounts. Executing this method requires admin access.

+ delete(name, force=None, x__xgafv=None)

+

Deletes the specified account regardless of its type: standalone, MCA or sub-account. Deleting an MCA leads to the deletion of all of its sub-accounts. Executing this method requires admin access. The deletion succeeds only if the account does not provide services to any other account and has no processed offers. You can use the `force` parameter to override this.

get(name, x__xgafv=None)

Retrieves an account from your Merchant Center account. After inserting, updating, or deleting an account, it may take several minutes before changes take effect.

list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists accounts accessible to the calling user and matching the constraints of the request such as page size or filters. This is not just listing the sub-accounts of an MCA, but all accounts the calling user has access to including other MCAs, linked accounts, standalone accounts and so on.

+

Lists accounts accessible to the calling user and matching the constraints of the request such as page size or filters. This is not just listing the sub-accounts of an MCA, but all accounts the calling user has access to including other MCAs, linked accounts, standalone accounts and so on. If no filter is provided, then it returns accounts the user is directly added to.

listSubaccounts(provider, pageSize=None, pageToken=None, x__xgafv=None)

List all sub-accounts for a given multi client account. This is a convenience wrapper for the more powerful `ListAccounts` method. This method will produce the same results as calling `ListsAccounts` with the following filter: `relationship(providerId={parent} AND service(type="ACCOUNT_AGGREGATION"))`

@@ -176,10 +176,6 @@

Method Details

The object takes the form of: { # Request message for the `CreateAndConfigureAccount` method. - "acceptTermsOfService": { # Reference to a Terms of Service resource. # Optional. The Terms of Service (ToS) to be accepted immediately upon account creation. - "name": "A String", # Required. The resource name of the terms of service version in the format `termsOfService/{version}`. To retrieve the latest version, use the [termsOfService.retrieveLatest](/merchant/api/reference/rest/accounts_v1beta/termsOfService/retrieveLatest) method. - "regionCode": "A String", # Required. Region code as defined by [CLDR](https://cldr.unicode.org/). This is either a country when the ToS applies specifically to that country or `001` when it applies globally. - }, "account": { # An account. # Required. The account to be created. "accountId": "A String", # Output only. The ID of the account. "accountName": "A String", # Required. A human-readable name of the account. See [store name](https://support.google.com/merchants/answer/160556) and [business name](https://support.google.com/merchants/answer/12159159) for more information. @@ -237,11 +233,12 @@

Method Details

- delete(name, x__xgafv=None) -
Deletes the specified account regardless of its type: standalone, MCA or sub-account. Deleting an MCA leads to the deletion of all of its sub-accounts. Executing this method requires admin access.
+    delete(name, force=None, x__xgafv=None)
+  
Deletes the specified account regardless of its type: standalone, MCA or sub-account. Deleting an MCA leads to the deletion of all of its sub-accounts. Executing this method requires admin access. The deletion succeeds only if the account does not provide services to any other account and has no processed offers. You can use the `force` parameter to override this.
 
 Args:
   name: string, Required. The name of the account to delete. Format: `accounts/{account}` (required)
+  force: boolean, Optional. If set to `true`, the account is deleted even if it provides services to other accounts or has processed offers.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -284,7 +281,7 @@ 

Method Details

list(filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists accounts accessible to the calling user and matching the constraints of the request such as page size or filters. This is not just listing the sub-accounts of an MCA, but all accounts the calling user has access to including other MCAs, linked accounts, standalone accounts and so on.
+  
Lists accounts accessible to the calling user and matching the constraints of the request such as page size or filters. This is not just listing the sub-accounts of an MCA, but all accounts the calling user has access to including other MCAs, linked accounts, standalone accounts and so on. If no filter is provided, then it returns accounts the user is directly added to.
 
 Args:
   filter: string, Optional. Returns only accounts that match the [filter](/merchant/api/guides/accounts/filter). For more details, see the [filter syntax reference](/merchant/api/guides/accounts/filter-syntax).
diff --git a/docs/dyn/merchantapi_conversions_v1beta.accounts.conversionSources.html b/docs/dyn/merchantapi_conversions_v1beta.accounts.conversionSources.html
index f0e4a04fc7e..9885715f723 100644
--- a/docs/dyn/merchantapi_conversions_v1beta.accounts.conversionSources.html
+++ b/docs/dyn/merchantapi_conversions_v1beta.accounts.conversionSources.html
@@ -217,7 +217,7 @@ 

Method Details

Fetches a conversion source.
 
 Args:
-  name: string, Required. The name of the conversion source to be fetched. Format: accounts/{account}/conversionsources/{conversion_source} (required)
+  name: string, Required. The name of the conversion source to be fetched. Format: accounts/{account}/conversionSources/{conversion_source} (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
diff --git a/docs/dyn/merchantapi_notifications_v1beta.accounts.notificationsubscriptions.html b/docs/dyn/merchantapi_notifications_v1beta.accounts.notificationsubscriptions.html
index b5ad9d03aaf..f6dec6acd39 100644
--- a/docs/dyn/merchantapi_notifications_v1beta.accounts.notificationsubscriptions.html
+++ b/docs/dyn/merchantapi_notifications_v1beta.accounts.notificationsubscriptions.html
@@ -79,7 +79,7 @@ 

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates a notification subscription for a merchant. We will allow the following types of notification subscriptions to exist together (per merchant as a subscriber per event type): 1. Subscription for all managed accounts + subscription for self 2. Multiple "partial" subscriptions for managed accounts + subscription for self we will not allow (per merchant as a subscriber per event type): 1. multiple self subscriptions. 2. multiple "all managed accounts" subscriptions. 3. all and partial subscriptions at the same time. 4. multiple partial subscriptions for the same target account

+

Creates a notification subscription for a business. For standalone or subaccounts accounts, the business can create a subscription for self. For MCAs, the business can create a subscription for all managed accounts or for a specific subaccount. We will allow the following types of notification subscriptions to exist together (per business as a subscriber per event type): 1. Subscription for all managed accounts + subscription for self. 2. Multiple "partial" subscriptions for managed accounts + subscription for self. we will not allow (per business as a subscriber per event type): 1. Multiple self subscriptions. 2. Multiple "all managed accounts" subscriptions. 3. "All managed accounts" subscription and partial subscriptions at the same time. 4. Multiple partial subscriptions for the same target account.

delete(name, x__xgafv=None)

Deletes a notification subscription for a merchant.

@@ -103,7 +103,7 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates a notification subscription for a merchant. We will allow the following types of notification subscriptions to exist together (per merchant as a subscriber per event type): 1. Subscription for all managed accounts + subscription for self 2. Multiple "partial" subscriptions for managed accounts + subscription for self we will not allow (per merchant as a subscriber per event type): 1. multiple self subscriptions. 2. multiple "all managed accounts" subscriptions. 3. all and partial subscriptions at the same time. 4. multiple partial subscriptions for the same target account
+  
Creates a notification subscription for a business. For standalone or subaccounts accounts, the business can create a subscription for self. For MCAs, the business can create a subscription for all managed accounts or for a specific subaccount. We will allow the following types of notification subscriptions to exist together (per business as a subscriber per event type): 1. Subscription for all managed accounts + subscription for self. 2. Multiple "partial" subscriptions for managed accounts + subscription for self. we will not allow (per business as a subscriber per event type): 1. Multiple self subscriptions. 2. Multiple "all managed accounts" subscriptions. 3. "All managed accounts" subscription and partial subscriptions at the same time. 4. Multiple partial subscriptions for the same target account.
 
 Args:
   parent: string, Required. The merchant account that owns the new notification subscription. Format: `accounts/{account}` (required)
diff --git a/docs/dyn/merchantapi_products_v1beta.accounts.productInputs.html b/docs/dyn/merchantapi_products_v1beta.accounts.productInputs.html
index 28282fe1f4f..6ecb9be5ad9 100644
--- a/docs/dyn/merchantapi_products_v1beta.accounts.productInputs.html
+++ b/docs/dyn/merchantapi_products_v1beta.accounts.productInputs.html
@@ -403,7 +403,7 @@ 

Method Details

}, ], "feedLabel": "A String", # Required. Immutable. The [feed label](https://developers.google.com/shopping-content/guides/products/feed-labels) for the product. - "name": "A String", # Identifier. The name of the product input. Format: `"{productinput.name=accounts/{account}/productInputs/{productinput}}"` + "name": "A String", # Identifier. The name of the product input. Format: `"{productinput.name=accounts/{account}/productInputs/{productinput}}"` where the last section `productinput` consists of 4 parts: channel~content_language~feed_label~offer_id example for product input name is "accounts/123/productInputs/online~en~US~sku123" "offerId": "A String", # Required. Immutable. Your unique identifier for the product. This is the same for the product input and processed product. Leading and trailing whitespaces are stripped and multiple whitespaces are replaced by a single whitespace upon submission. See the [products data specification](https://support.google.com/merchants/answer/188494#id) for details. "product": "A String", # Output only. The name of the processed product. Format: `"{product.name=accounts/{account}/products/{product}}"` "versionNumber": "A String", # Optional. Represents the existing version (freshness) of the product, which can be used to preserve the right order when multiple updates are done at the same time. If set, the insertion is prevented when version number is lower than the current version number of the existing product. Re-insertion (for example, product refresh after 30 days) can be performed with the current `version_number`. Only supported for insertions into primary data sources. If the operation is prevented, the aborted exception will be thrown. @@ -704,7 +704,7 @@

Method Details

}, ], "feedLabel": "A String", # Required. Immutable. The [feed label](https://developers.google.com/shopping-content/guides/products/feed-labels) for the product. - "name": "A String", # Identifier. The name of the product input. Format: `"{productinput.name=accounts/{account}/productInputs/{productinput}}"` + "name": "A String", # Identifier. The name of the product input. Format: `"{productinput.name=accounts/{account}/productInputs/{productinput}}"` where the last section `productinput` consists of 4 parts: channel~content_language~feed_label~offer_id example for product input name is "accounts/123/productInputs/online~en~US~sku123" "offerId": "A String", # Required. Immutable. Your unique identifier for the product. This is the same for the product input and processed product. Leading and trailing whitespaces are stripped and multiple whitespaces are replaced by a single whitespace upon submission. See the [products data specification](https://support.google.com/merchants/answer/188494#id) for details. "product": "A String", # Output only. The name of the processed product. Format: `"{product.name=accounts/{account}/products/{product}}"` "versionNumber": "A String", # Optional. Represents the existing version (freshness) of the product, which can be used to preserve the right order when multiple updates are done at the same time. If set, the insertion is prevented when version number is lower than the current version number of the existing product. Re-insertion (for example, product refresh after 30 days) can be performed with the current `version_number`. Only supported for insertions into primary data sources. If the operation is prevented, the aborted exception will be thrown. diff --git a/docs/dyn/merchantapi_products_v1beta.accounts.products.html b/docs/dyn/merchantapi_products_v1beta.accounts.products.html index d0f02b783dc..7c3e30e2e95 100644 --- a/docs/dyn/merchantapi_products_v1beta.accounts.products.html +++ b/docs/dyn/merchantapi_products_v1beta.accounts.products.html @@ -393,7 +393,7 @@

Method Details

], "dataSource": "A String", # Output only. The primary data source of the product. "feedLabel": "A String", # Output only. The feed label for the product. - "name": "A String", # The name of the product. Format: `"{product.name=accounts/{account}/products/{product}}"` + "name": "A String", # The name of the product. Format: `"{product.name=accounts/{account}/products/{product}}"` where the last section `product` consists of 4 parts: channel~content_language~feed_label~offer_id example for product name is "accounts/123/products/online~en~US~sku123" "offerId": "A String", # Output only. Your unique identifier for the product. This is the same for the product input and processed product. Leading and trailing whitespaces are stripped and multiple whitespaces are replaced by a single whitespace upon submission. See the [product data specification](https://support.google.com/merchants/answer/188494#id) for details. "productStatus": { # The status of a product, data validation issues, that is, information about a product computed asynchronously. # Output only. The status of a product, data validation issues, that is, information about a product computed asynchronously. "creationDate": "A String", # Date on which the item has been created, in [ISO 8601](http://en.wikipedia.org/wiki/ISO_8601) format. @@ -439,7 +439,7 @@

Method Details

Args: parent: string, Required. The account to list processed products for. Format: accounts/{account} (required) - pageSize: integer, The maximum number of products to return. The service may return fewer than this value. The maximum value is 1000; values above 1000 will be coerced to 1000. If unspecified, the maximum number of products will be returned. + pageSize: integer, The maximum number of products to return. The service may return fewer than this value. The maximum value is 250; values above 250 will be coerced to 250. If unspecified, the maximum number of products will be returned. pageToken: string, A page token, received from a previous `ListProducts` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListProducts` must match the call that provided the page token. x__xgafv: string, V1 error format. Allowed values @@ -739,7 +739,7 @@

Method Details

], "dataSource": "A String", # Output only. The primary data source of the product. "feedLabel": "A String", # Output only. The feed label for the product. - "name": "A String", # The name of the product. Format: `"{product.name=accounts/{account}/products/{product}}"` + "name": "A String", # The name of the product. Format: `"{product.name=accounts/{account}/products/{product}}"` where the last section `product` consists of 4 parts: channel~content_language~feed_label~offer_id example for product name is "accounts/123/products/online~en~US~sku123" "offerId": "A String", # Output only. Your unique identifier for the product. This is the same for the product input and processed product. Leading and trailing whitespaces are stripped and multiple whitespaces are replaced by a single whitespace upon submission. See the [product data specification](https://support.google.com/merchants/answer/188494#id) for details. "productStatus": { # The status of a product, data validation issues, that is, information about a product computed asynchronously. # Output only. The status of a product, data validation issues, that is, information about a product computed asynchronously. "creationDate": "A String", # Date on which the item has been created, in [ISO 8601](http://en.wikipedia.org/wiki/ISO_8601) format. diff --git a/docs/dyn/migrationcenter_v1.projects.locations.assets.html b/docs/dyn/migrationcenter_v1.projects.locations.assets.html index 4011ea2b394..c135edd0fd4 100644 --- a/docs/dyn/migrationcenter_v1.projects.locations.assets.html +++ b/docs/dyn/migrationcenter_v1.projects.locations.assets.html @@ -273,7 +273,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -614,7 +614,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -986,7 +986,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -1338,7 +1338,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -1694,7 +1694,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -2030,7 +2030,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -2331,7 +2331,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. diff --git a/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html b/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html index 6fc9b4bf945..7ce28792b32 100644 --- a/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html +++ b/docs/dyn/migrationcenter_v1.projects.locations.reportConfigs.reports.html @@ -115,7 +115,7 @@

Method Details

"state": "A String", # Report creation state. "summary": { # Describes the Summary view of a Report, which contains aggregated values for all the groups and preference sets included in this Report. # Output only. Summary view of the Report. "allAssetsStats": { # Aggregate statistics for a collection of assets. # Aggregate statistics for all the assets across all the groups. - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -166,7 +166,7 @@

Method Details

"groupFindings": [ # Findings for each Group included in this report. { # Summary Findings for a specific Group. "assetAggregateStats": { # Aggregate statistics for a collection of assets. # Summary statistics for all the assets in this group. - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -439,7 +439,7 @@

Method Details

"state": "A String", # Report creation state. "summary": { # Describes the Summary view of a Report, which contains aggregated values for all the groups and preference sets included in this Report. # Output only. Summary view of the Report. "allAssetsStats": { # Aggregate statistics for a collection of assets. # Aggregate statistics for all the assets across all the groups. - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -490,7 +490,7 @@

Method Details

"groupFindings": [ # Findings for each Group included in this report. { # Summary Findings for a specific Group. "assetAggregateStats": { # Aggregate statistics for a collection of assets. # Summary statistics for all the assets in this group. - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -704,7 +704,7 @@

Method Details

"state": "A String", # Report creation state. "summary": { # Describes the Summary view of a Report, which contains aggregated values for all the groups and preference sets included in this Report. # Output only. Summary view of the Report. "allAssetsStats": { # Aggregate statistics for a collection of assets. # Aggregate statistics for all the assets across all the groups. - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -755,7 +755,7 @@

Method Details

"groupFindings": [ # Findings for each Group included in this report. { # Summary Findings for a specific Group. "assetAggregateStats": { # Aggregate statistics for a collection of assets. # Summary statistics for all the assets in this group. - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. diff --git a/docs/dyn/migrationcenter_v1.projects.locations.sources.errorFrames.html b/docs/dyn/migrationcenter_v1.projects.locations.sources.errorFrames.html index 57e3d8e027c..3c357daa560 100644 --- a/docs/dyn/migrationcenter_v1.projects.locations.sources.errorFrames.html +++ b/docs/dyn/migrationcenter_v1.projects.locations.sources.errorFrames.html @@ -139,7 +139,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. @@ -428,7 +428,7 @@

Method Details

"cpuArchitecture": "A String", # CPU architecture, e.g., "x64-based PC", "x86_64", "i686" etc. "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmwareType": "A String", # Firmware type. "hyperthreading": "A String", # CPU hyper-threading support. "vendor": "A String", # Hardware vendor. diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html index 8f2dd979bb2..092eb4d11bb 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.assets.html @@ -716,7 +716,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -1301,7 +1301,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -1918,7 +1918,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -2516,7 +2516,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -3116,7 +3116,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -3696,7 +3696,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -4126,7 +4126,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.preferenceSets.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.preferenceSets.html index f7b49f12194..fe390506781 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.preferenceSets.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.preferenceSets.html @@ -119,7 +119,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -133,7 +133,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -145,7 +145,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -347,7 +347,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -361,7 +361,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -373,7 +373,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -515,7 +515,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -529,7 +529,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -541,7 +541,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -691,7 +691,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -705,7 +705,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -717,7 +717,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html index 35d6a50c647..43e9418b604 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.reportConfigs.reports.html @@ -123,7 +123,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -204,7 +204,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -287,7 +287,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -543,7 +543,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -557,7 +557,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -569,7 +569,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -726,7 +726,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -906,7 +906,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -987,7 +987,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -1070,7 +1070,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -1326,7 +1326,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -1340,7 +1340,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -1352,7 +1352,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -1509,7 +1509,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -1630,7 +1630,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -1711,7 +1711,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -1794,7 +1794,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. @@ -2050,7 +2050,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -2064,7 +2064,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -2076,7 +2076,7 @@

Method Details

"backupMode": "A String", # Optional. Automated backup mode. }, "commitmentPlan": "A String", # Optional. Commitment plan to consider when calculating costs. Only regular CUDs (not flexible) are currently available. - "edition": "A String", # Optional. Cloud SQL edition. For SQL Server, only Enterprise is available. + "edition": "A String", # Optional. Preferred Cloud SQL edition. "persistentDiskType": "A String", # Optional. Persistent disk type to use. If unspecified, a disk type is recommended based on available usage data. For SQL Server, only SSD is available. For MySQL and PostgreSQL, only STANDARD (HDD) and SSD types are available. "sizingOptimizationStrategy": "A String", # Optional. Sizing optimization strategy of the database. Currently supported for Cloud SQL are just two values: SIZING_OPTIMIZATION_STRATEGY_MODERATE and SIZING_OPTIMIZATION_STRATEGY_SAME_AS_SOURCE. SIZING_OPTIMIZATION_STRATEGY_UNSPECIFIED will behave like SIZING_OPTIMIZATION_STRATEGY_MODERATE. "zoneAvailability": "A String", # Optional. Preferred zone availability. @@ -2233,7 +2233,7 @@

Method Details

}, ], }, - "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of CPU core counts. + "coreCountHistogram": { # A Histogram Chart shows a distribution of values into buckets, showing a count of values which fall into a bucket. # Histogram showing a distribution of logical CPU core counts. "buckets": [ # Buckets in the histogram. There will be `n+1` buckets matching `n` lower bounds in the request. The first bucket will be from -infinity to the first bound. Subsequent buckets will be between one bound and the next. The final bucket will be from the final bound to infinity. { # A histogram bucket with a lower and upper bound, and a count of items with a field value between those bounds. The lower bound is inclusive and the upper bound is exclusive. Lower bound may be -infinity and upper bound may be infinity. "count": "A String", # Count of items in the bucket. diff --git a/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html b/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html index 82f8d3104ec..743c75f4d73 100644 --- a/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html +++ b/docs/dyn/migrationcenter_v1alpha1.projects.locations.sources.errorFrames.html @@ -460,7 +460,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. @@ -912,7 +912,7 @@

Method Details

"cpuManufacturer": "A String", # CPU manufacturer, e.g., "Intel", "AMD". "cpuName": "A String", # CPU name, e.g., "Intel Xeon E5-2690", "AMD EPYC 7571" etc. "cpuSocketCount": 42, # Number of processor sockets allocated to the machine. - "cpuThreadCount": 42, # Number of CPU threads allocated to the machine. + "cpuThreadCount": 42, # Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine. "firmware": "A String", # Firmware (BIOS/efi). "hyperthreading": "A String", # CPU hyperthreading support. "vendor": "A String", # Hardware vendor. diff --git a/docs/dyn/monitoring_v1.projects.location.prometheus.api.v1.html b/docs/dyn/monitoring_v1.projects.location.prometheus.api.v1.html index dffb53ce524..fb03f61cc72 100644 --- a/docs/dyn/monitoring_v1.projects.location.prometheus.api.v1.html +++ b/docs/dyn/monitoring_v1.projects.location.prometheus.api.v1.html @@ -154,7 +154,7 @@

Method Details

The object takes the form of: { # QueryInstantRequest holds all parameters of the Prometheus upstream instant query API plus GCM specific parameters. - "query": "A String", # A PromQL query string. Query lanauge documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/. + "query": "A String", # A PromQL query string. Query language documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/. "time": "A String", # The single point in time to evaluate the query for. Either floating point UNIX seconds or RFC3339 formatted timestamp. "timeout": "A String", # An upper bound timeout for the query. Either a Prometheus duration string (https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations) or floating point seconds. This non-standard encoding must be used for compatibility with the open source API. Clients may still implement timeouts at the connection level while ignoring this field. } @@ -190,7 +190,7 @@

Method Details

{ # QueryExemplarsRequest holds all parameters of the Prometheus upstream API for querying exemplars. "end": "A String", # The end time to evaluate the query for. Either floating point UNIX seconds or RFC3339 formatted timestamp. - "query": "A String", # A PromQL query string. Query lanauge documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/. + "query": "A String", # A PromQL query string. Query language documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/. "start": "A String", # The start time to evaluate the query for. Either floating point UNIX seconds or RFC3339 formatted timestamp. } @@ -225,7 +225,7 @@

Method Details

{ # QueryRangeRequest holds all parameters of the Prometheus upstream range query API plus GCM specific parameters. "end": "A String", # The end time to evaluate the query for. Either floating point UNIX seconds or RFC3339 formatted timestamp. - "query": "A String", # A PromQL query string. Query lanauge documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/. + "query": "A String", # A PromQL query string. Query language documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/. "start": "A String", # The start time to evaluate the query for. Either floating point UNIX seconds or RFC3339 formatted timestamp. "step": "A String", # The resolution of query result. Either a Prometheus duration string (https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations) or floating point seconds. This non-standard encoding must be used for compatibility with the open source API. Clients may still implement timeouts at the connection level while ignoring this field. "timeout": "A String", # An upper bound timeout for the query. Either a Prometheus duration string (https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations) or floating point seconds. This non-standard encoding must be used for compatibility with the open source API. Clients may still implement timeouts at the connection level while ignoring this field. diff --git a/docs/dyn/monitoring_v3.projects.alertPolicies.html b/docs/dyn/monitoring_v3.projects.alertPolicies.html index 6375b2167d3..825c0ae7fa2 100644 --- a/docs/dyn/monitoring_v3.projects.alertPolicies.html +++ b/docs/dyn/monitoring_v3.projects.alertPolicies.html @@ -171,6 +171,32 @@

Method Details

"query": "A String", # Required. The PromQL expression to evaluate. Every evaluation cycle this expression is evaluated at the current time, and all resultant time series become pending/firing alerts. This field must not be empty. "ruleGroup": "A String", # Optional. The rule group name of this alert in the corresponding Prometheus configuration file.Some external tools may require this field to be populated correctly in order to refer to the original Prometheus configuration file. The rule group name and the alert name are necessary to update the relevant AlertPolicies in case the definition of the rule group changes in the future.This field is optional. If this field is not empty, then it must contain a valid UTF-8 string. This field may not exceed 2048 Unicode characters in length. }, + "conditionSql": { # A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing. # A condition that uses SQL to define alerts in Logs Analytics. + "booleanTest": { # A test that uses an alerting result in a boolean column produced by the SQL query. # Test the boolean value in the indicated column. + "column": "A String", # Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. + }, + "daily": { # Used to schedule the query to run every so many days. # Schedule the query to execute every so many days. + "executionTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp. # Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. + "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. + "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + }, + "periodicity": 42, # Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "hourly": { # Used to schedule the query to run every so many hours. # Schedule the query to execute every so many hours. + "minuteOffset": 42, # Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + "periodicity": 42, # Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "minutes": { # Used to schedule the query to run every so many minutes. # Schedule the query to execute every so many minutes. + "periodicity": 42, # Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "query": "A String", # Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL + "rowCountTest": { # A test that checks if the number of rows in the result set violates some threshold. # Test the row count against a threshold. + "comparison": "A String", # Required. The comparison to apply between the number of rows returned by the query and the threshold. + "threshold": "A String", # Required. The value against which to compare the row count. + }, + }, "conditionThreshold": { # A condition type that compares a collection of time series against a threshold. # A condition that compares a time series against a threshold. "aggregations": [ # Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field. { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). @@ -319,6 +345,32 @@

Method Details

"query": "A String", # Required. The PromQL expression to evaluate. Every evaluation cycle this expression is evaluated at the current time, and all resultant time series become pending/firing alerts. This field must not be empty. "ruleGroup": "A String", # Optional. The rule group name of this alert in the corresponding Prometheus configuration file.Some external tools may require this field to be populated correctly in order to refer to the original Prometheus configuration file. The rule group name and the alert name are necessary to update the relevant AlertPolicies in case the definition of the rule group changes in the future.This field is optional. If this field is not empty, then it must contain a valid UTF-8 string. This field may not exceed 2048 Unicode characters in length. }, + "conditionSql": { # A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing. # A condition that uses SQL to define alerts in Logs Analytics. + "booleanTest": { # A test that uses an alerting result in a boolean column produced by the SQL query. # Test the boolean value in the indicated column. + "column": "A String", # Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. + }, + "daily": { # Used to schedule the query to run every so many days. # Schedule the query to execute every so many days. + "executionTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp. # Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. + "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. + "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + }, + "periodicity": 42, # Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "hourly": { # Used to schedule the query to run every so many hours. # Schedule the query to execute every so many hours. + "minuteOffset": 42, # Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + "periodicity": 42, # Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "minutes": { # Used to schedule the query to run every so many minutes. # Schedule the query to execute every so many minutes. + "periodicity": 42, # Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "query": "A String", # Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL + "rowCountTest": { # A test that checks if the number of rows in the result set violates some threshold. # Test the row count against a threshold. + "comparison": "A String", # Required. The comparison to apply between the number of rows returned by the query and the threshold. + "threshold": "A String", # Required. The value against which to compare the row count. + }, + }, "conditionThreshold": { # A condition type that compares a collection of time series against a threshold. # A condition that compares a time series against a threshold. "aggregations": [ # Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field. { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). @@ -492,6 +544,32 @@

Method Details

"query": "A String", # Required. The PromQL expression to evaluate. Every evaluation cycle this expression is evaluated at the current time, and all resultant time series become pending/firing alerts. This field must not be empty. "ruleGroup": "A String", # Optional. The rule group name of this alert in the corresponding Prometheus configuration file.Some external tools may require this field to be populated correctly in order to refer to the original Prometheus configuration file. The rule group name and the alert name are necessary to update the relevant AlertPolicies in case the definition of the rule group changes in the future.This field is optional. If this field is not empty, then it must contain a valid UTF-8 string. This field may not exceed 2048 Unicode characters in length. }, + "conditionSql": { # A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing. # A condition that uses SQL to define alerts in Logs Analytics. + "booleanTest": { # A test that uses an alerting result in a boolean column produced by the SQL query. # Test the boolean value in the indicated column. + "column": "A String", # Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. + }, + "daily": { # Used to schedule the query to run every so many days. # Schedule the query to execute every so many days. + "executionTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp. # Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. + "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. + "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + }, + "periodicity": 42, # Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "hourly": { # Used to schedule the query to run every so many hours. # Schedule the query to execute every so many hours. + "minuteOffset": 42, # Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + "periodicity": 42, # Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "minutes": { # Used to schedule the query to run every so many minutes. # Schedule the query to execute every so many minutes. + "periodicity": 42, # Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "query": "A String", # Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL + "rowCountTest": { # A test that checks if the number of rows in the result set violates some threshold. # Test the row count against a threshold. + "comparison": "A String", # Required. The comparison to apply between the number of rows returned by the query and the threshold. + "threshold": "A String", # Required. The value against which to compare the row count. + }, + }, "conditionThreshold": { # A condition type that compares a collection of time series against a threshold. # A condition that compares a time series against a threshold. "aggregations": [ # Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field. { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). @@ -653,6 +731,32 @@

Method Details

"query": "A String", # Required. The PromQL expression to evaluate. Every evaluation cycle this expression is evaluated at the current time, and all resultant time series become pending/firing alerts. This field must not be empty. "ruleGroup": "A String", # Optional. The rule group name of this alert in the corresponding Prometheus configuration file.Some external tools may require this field to be populated correctly in order to refer to the original Prometheus configuration file. The rule group name and the alert name are necessary to update the relevant AlertPolicies in case the definition of the rule group changes in the future.This field is optional. If this field is not empty, then it must contain a valid UTF-8 string. This field may not exceed 2048 Unicode characters in length. }, + "conditionSql": { # A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing. # A condition that uses SQL to define alerts in Logs Analytics. + "booleanTest": { # A test that uses an alerting result in a boolean column produced by the SQL query. # Test the boolean value in the indicated column. + "column": "A String", # Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. + }, + "daily": { # Used to schedule the query to run every so many days. # Schedule the query to execute every so many days. + "executionTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp. # Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. + "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. + "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + }, + "periodicity": 42, # Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "hourly": { # Used to schedule the query to run every so many hours. # Schedule the query to execute every so many hours. + "minuteOffset": 42, # Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + "periodicity": 42, # Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "minutes": { # Used to schedule the query to run every so many minutes. # Schedule the query to execute every so many minutes. + "periodicity": 42, # Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "query": "A String", # Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL + "rowCountTest": { # A test that checks if the number of rows in the result set violates some threshold. # Test the row count against a threshold. + "comparison": "A String", # Required. The comparison to apply between the number of rows returned by the query and the threshold. + "threshold": "A String", # Required. The value against which to compare the row count. + }, + }, "conditionThreshold": { # A condition type that compares a collection of time series against a threshold. # A condition that compares a time series against a threshold. "aggregations": [ # Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field. { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). @@ -821,6 +925,32 @@

Method Details

"query": "A String", # Required. The PromQL expression to evaluate. Every evaluation cycle this expression is evaluated at the current time, and all resultant time series become pending/firing alerts. This field must not be empty. "ruleGroup": "A String", # Optional. The rule group name of this alert in the corresponding Prometheus configuration file.Some external tools may require this field to be populated correctly in order to refer to the original Prometheus configuration file. The rule group name and the alert name are necessary to update the relevant AlertPolicies in case the definition of the rule group changes in the future.This field is optional. If this field is not empty, then it must contain a valid UTF-8 string. This field may not exceed 2048 Unicode characters in length. }, + "conditionSql": { # A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing. # A condition that uses SQL to define alerts in Logs Analytics. + "booleanTest": { # A test that uses an alerting result in a boolean column produced by the SQL query. # Test the boolean value in the indicated column. + "column": "A String", # Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. + }, + "daily": { # Used to schedule the query to run every so many days. # Schedule the query to execute every so many days. + "executionTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp. # Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. + "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. + "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + }, + "periodicity": 42, # Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "hourly": { # Used to schedule the query to run every so many hours. # Schedule the query to execute every so many hours. + "minuteOffset": 42, # Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + "periodicity": 42, # Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "minutes": { # Used to schedule the query to run every so many minutes. # Schedule the query to execute every so many minutes. + "periodicity": 42, # Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "query": "A String", # Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL + "rowCountTest": { # A test that checks if the number of rows in the result set violates some threshold. # Test the row count against a threshold. + "comparison": "A String", # Required. The comparison to apply between the number of rows returned by the query and the threshold. + "threshold": "A String", # Required. The value against which to compare the row count. + }, + }, "conditionThreshold": { # A condition type that compares a collection of time series against a threshold. # A condition that compares a time series against a threshold. "aggregations": [ # Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field. { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). @@ -970,6 +1100,32 @@

Method Details

"query": "A String", # Required. The PromQL expression to evaluate. Every evaluation cycle this expression is evaluated at the current time, and all resultant time series become pending/firing alerts. This field must not be empty. "ruleGroup": "A String", # Optional. The rule group name of this alert in the corresponding Prometheus configuration file.Some external tools may require this field to be populated correctly in order to refer to the original Prometheus configuration file. The rule group name and the alert name are necessary to update the relevant AlertPolicies in case the definition of the rule group changes in the future.This field is optional. If this field is not empty, then it must contain a valid UTF-8 string. This field may not exceed 2048 Unicode characters in length. }, + "conditionSql": { # A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing. # A condition that uses SQL to define alerts in Logs Analytics. + "booleanTest": { # A test that uses an alerting result in a boolean column produced by the SQL query. # Test the boolean value in the indicated column. + "column": "A String", # Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. + }, + "daily": { # Used to schedule the query to run every so many days. # Schedule the query to execute every so many days. + "executionTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp. # Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. + "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. + "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + }, + "periodicity": 42, # Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "hourly": { # Used to schedule the query to run every so many hours. # Schedule the query to execute every so many hours. + "minuteOffset": 42, # Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + "periodicity": 42, # Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "minutes": { # Used to schedule the query to run every so many minutes. # Schedule the query to execute every so many minutes. + "periodicity": 42, # Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc) + }, + "query": "A String", # Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL + "rowCountTest": { # A test that checks if the number of rows in the result set violates some threshold. # Test the row count against a threshold. + "comparison": "A String", # Required. The comparison to apply between the number of rows returned by the query and the threshold. + "threshold": "A String", # Required. The value against which to compare the row count. + }, + }, "conditionThreshold": { # A condition type that compares a collection of time series against a threshold. # A condition that compares a time series against a threshold. "aggregations": [ # Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field. { # Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). diff --git a/docs/dyn/mybusinessbusinessinformation_v1.accounts.locations.html b/docs/dyn/mybusinessbusinessinformation_v1.accounts.locations.html index e19bc3f47ab..65ac5aae6b9 100644 --- a/docs/dyn/mybusinessbusinessinformation_v1.accounts.locations.html +++ b/docs/dyn/mybusinessbusinessinformation_v1.accounts.locations.html @@ -174,17 +174,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -214,17 +214,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -279,10 +279,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -291,10 +291,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. @@ -409,17 +409,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -449,17 +449,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -514,10 +514,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -526,10 +526,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. @@ -656,17 +656,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -696,17 +696,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -761,10 +761,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -773,10 +773,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. diff --git a/docs/dyn/mybusinessbusinessinformation_v1.googleLocations.html b/docs/dyn/mybusinessbusinessinformation_v1.googleLocations.html index 6866ff35389..0ef409dbd96 100644 --- a/docs/dyn/mybusinessbusinessinformation_v1.googleLocations.html +++ b/docs/dyn/mybusinessbusinessinformation_v1.googleLocations.html @@ -168,17 +168,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -208,17 +208,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -273,10 +273,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -285,10 +285,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. @@ -407,17 +407,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -447,17 +447,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -512,10 +512,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -524,10 +524,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. diff --git a/docs/dyn/mybusinessbusinessinformation_v1.locations.html b/docs/dyn/mybusinessbusinessinformation_v1.locations.html index 8ac81bc1f72..21663a20149 100644 --- a/docs/dyn/mybusinessbusinessinformation_v1.locations.html +++ b/docs/dyn/mybusinessbusinessinformation_v1.locations.html @@ -212,17 +212,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -252,17 +252,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -317,10 +317,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -329,10 +329,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. @@ -496,17 +496,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -536,17 +536,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -601,10 +601,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -613,10 +613,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. @@ -733,17 +733,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -773,17 +773,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -838,10 +838,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -850,10 +850,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. @@ -968,17 +968,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1008,17 +1008,17 @@

Method Details

{ # Represents a span of time that the business is open, starting on the specified open day/time and closing on the specified close day/time. The closing time must occur after the opening time, for example later in the same day, or on a subsequent day. "closeDay": "A String", # Required. Indicates the day of the week this period ends on. "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "openDay": "A String", # Required. Indicates the day of the week this period starts on. "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, }, ], @@ -1073,10 +1073,10 @@

Method Details

"specialHourPeriods": [ # Required. A list of exceptions to the business's regular hours. { # Represents a single time period when a location's operational hours differ from its normal business hours. A special hour period must represent a range of less than 24 hours. The `open_time` and `start_date` must predate the `close_time` and `end_date`. The `close_time` and `end_date` can extend to 11:59 a.m. on the day after the specified `start_date`. For example, the following inputs are valid: start_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-23, open_time=08:00, close_time=18:00 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=11:59 The following inputs are not valid: start_date=2015-11-23, open_time=13:00, close_time=11:59 start_date=2015-11-23, end_date=2015-11-24, open_time=13:00, close_time=12:00 start_date=2015-11-23, end_date=2015-11-25, open_time=08:00, close_time=18:00 "closeTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00, where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "closed": True or False, # Optional. If true, `end_date`, `open_time`, and `close_time` are ignored, and the date specified in `start_date` is treated as the location being closed for the entire day. "endDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Optional. The calendar date this special hour period ends on. If `end_date` field is not set, default to the date specified in `start_date`. If set, this field must be equal to or at most 1 day after `start_date`. @@ -1085,10 +1085,10 @@

Method Details

"year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year. }, "openTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Optional. Valid values are 00:00-24:00 where 24:00 represents midnight at the end of the specified day field. Must be specified if `closed` is false. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "startDate": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Required. The calendar date this special hour period starts on. "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant. diff --git a/docs/dyn/mybusinesslodging_v1.locations.html b/docs/dyn/mybusinesslodging_v1.locations.html index 96d36ae0bb1..6a0756dbd40 100644 --- a/docs/dyn/mybusinesslodging_v1.locations.html +++ b/docs/dyn/mybusinesslodging_v1.locations.html @@ -824,17 +824,17 @@

Method Details

"allInclusiveOnly": True or False, # All inclusive only. The only rate option offered by the hotel is a rate that includes the cost of the room, meals, activities and other amenities that might otherwise be charged separately. "allInclusiveOnlyException": "A String", # All inclusive only exception. "checkinTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-in time. The time of the day at which the hotel begins providing guests access to their unit at the beginning of their stay. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkinTimeException": "A String", # Check-in time exception. "checkoutTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-out time. The time of the day on the last day of a guest's reserved stay at which the guest must vacate their room and settle their bill. Some hotels may offer late or early check out for a fee. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkoutTimeException": "A String", # Check-out time exception. "kidsStayFree": True or False, # Kids stay free. The children of guests are allowed to stay in the room/suite of a parent or adult without an additional fee. The policy may or may not stipulate a limit of the child's age or the overall number of children allowed. @@ -1973,17 +1973,17 @@

Method Details

"allInclusiveOnly": True or False, # All inclusive only. The only rate option offered by the hotel is a rate that includes the cost of the room, meals, activities and other amenities that might otherwise be charged separately. "allInclusiveOnlyException": "A String", # All inclusive only exception. "checkinTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-in time. The time of the day at which the hotel begins providing guests access to their unit at the beginning of their stay. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkinTimeException": "A String", # Check-in time exception. "checkoutTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-out time. The time of the day on the last day of a guest's reserved stay at which the guest must vacate their room and settle their bill. Some hotels may offer late or early check out for a fee. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkoutTimeException": "A String", # Check-out time exception. "kidsStayFree": True or False, # Kids stay free. The children of guests are allowed to stay in the room/suite of a parent or adult without an additional fee. The policy may or may not stipulate a limit of the child's age or the overall number of children allowed. @@ -3121,17 +3121,17 @@

Method Details

"allInclusiveOnly": True or False, # All inclusive only. The only rate option offered by the hotel is a rate that includes the cost of the room, meals, activities and other amenities that might otherwise be charged separately. "allInclusiveOnlyException": "A String", # All inclusive only exception. "checkinTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-in time. The time of the day at which the hotel begins providing guests access to their unit at the beginning of their stay. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkinTimeException": "A String", # Check-in time exception. "checkoutTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-out time. The time of the day on the last day of a guest's reserved stay at which the guest must vacate their room and settle their bill. Some hotels may offer late or early check out for a fee. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkoutTimeException": "A String", # Check-out time exception. "kidsStayFree": True or False, # Kids stay free. The children of guests are allowed to stay in the room/suite of a parent or adult without an additional fee. The policy may or may not stipulate a limit of the child's age or the overall number of children allowed. diff --git a/docs/dyn/mybusinesslodging_v1.locations.lodging.html b/docs/dyn/mybusinesslodging_v1.locations.lodging.html index 938c726731e..f086d469cc6 100644 --- a/docs/dyn/mybusinesslodging_v1.locations.lodging.html +++ b/docs/dyn/mybusinesslodging_v1.locations.lodging.html @@ -818,17 +818,17 @@

Method Details

"allInclusiveOnly": True or False, # All inclusive only. The only rate option offered by the hotel is a rate that includes the cost of the room, meals, activities and other amenities that might otherwise be charged separately. "allInclusiveOnlyException": "A String", # All inclusive only exception. "checkinTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-in time. The time of the day at which the hotel begins providing guests access to their unit at the beginning of their stay. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkinTimeException": "A String", # Check-in time exception. "checkoutTime": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Check-out time. The time of the day on the last day of a guest's reserved stay at which the guest must vacate their room and settle their bill. Some hotels may offer late or early check out for a fee. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "checkoutTimeException": "A String", # Check-out time exception. "kidsStayFree": True or False, # Kids stay free. The children of guests are allowed to stay in the room/suite of a parent or adult without an additional fee. The policy may or may not stipulate a limit of the child's age or the overall number of children allowed. diff --git a/docs/dyn/osconfig_v1.projects.patchDeployments.html b/docs/dyn/osconfig_v1.projects.patchDeployments.html index 9fc25e4568d..5be7ba3c028 100644 --- a/docs/dyn/osconfig_v1.projects.patchDeployments.html +++ b/docs/dyn/osconfig_v1.projects.patchDeployments.html @@ -263,10 +263,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -443,10 +443,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -647,10 +647,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -838,10 +838,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1035,10 +1035,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1215,10 +1215,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1407,10 +1407,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1599,10 +1599,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". diff --git a/docs/dyn/osconfig_v1beta.projects.patchDeployments.html b/docs/dyn/osconfig_v1beta.projects.patchDeployments.html index 30ab9b57171..4b547c23b20 100644 --- a/docs/dyn/osconfig_v1beta.projects.patchDeployments.html +++ b/docs/dyn/osconfig_v1beta.projects.patchDeployments.html @@ -263,10 +263,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -443,10 +443,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -647,10 +647,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -838,10 +838,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1035,10 +1035,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1215,10 +1215,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1407,10 +1407,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". @@ -1599,10 +1599,10 @@

Method Details

"nextExecuteTime": "A String", # Output only. The time the next patch job is scheduled to run. "startTime": "A String", # Optional. The time that the recurring schedule becomes effective. Defaults to `create_time` of the patch deployment. "timeOfDay": { # Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and `google.protobuf.Timestamp`. # Required. Time of the day to run a recurring deployment. - "hours": 42, # Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. - "minutes": 42, # Minutes of hour of day. Must be from 0 to 59. - "nanos": 42, # Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. - "seconds": 42, # Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds. + "hours": 42, # Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + "minutes": 42, # Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59. + "nanos": 42, # Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999. + "seconds": 42, # Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds. }, "timeZone": { # Represents a time zone from the [IANA Time Zone Database](https://www.iana.org/time-zones). # Required. Defines the time zone that `time_of_day` is relative to. The rules for daylight saving time are determined by the chosen time zone. "id": "A String", # IANA Time Zone Database time zone, e.g. "America/New_York". diff --git a/docs/dyn/realtimebidding_v1.bidders.creatives.html b/docs/dyn/realtimebidding_v1.bidders.creatives.html index d90eeae915a..63742a28309 100644 --- a/docs/dyn/realtimebidding_v1.bidders.creatives.html +++ b/docs/dyn/realtimebidding_v1.bidders.creatives.html @@ -546,8 +546,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, }, ], diff --git a/docs/dyn/realtimebidding_v1.buyers.creatives.html b/docs/dyn/realtimebidding_v1.buyers.creatives.html index 1a849302326..6e61dcd1750 100644 --- a/docs/dyn/realtimebidding_v1.buyers.creatives.html +++ b/docs/dyn/realtimebidding_v1.buyers.creatives.html @@ -537,8 +537,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, } @@ -980,8 +980,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, }
@@ -1435,8 +1435,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, }
@@ -1895,8 +1895,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, }, ], @@ -2357,8 +2357,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, } @@ -2801,8 +2801,8 @@

Method Details

"skipOffset": "A String", # The minimum duration that the user has to watch before being able to skip this ad. If the field is not set, the ad is not skippable. If the field is set, the ad is skippable. Can be used to filter the response of the creatives.list method. "vastVersion": "A String", # The maximum VAST version across all wrapped VAST documents. Can be used to filter the response of the creatives.list method. }, - "videoUrl": "A String", # The URL to fetch a video ad. - "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard. + "videoUrl": "A String", # The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard. + "videoVastXml": "A String", # The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard. }, }
diff --git a/docs/dyn/run_v1.namespaces.configurations.html b/docs/dyn/run_v1.namespaces.configurations.html index d875e7d0ed6..c1964b062fa 100644 --- a/docs/dyn/run_v1.namespaces.configurations.html +++ b/docs/dyn/run_v1.namespaces.configurations.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -141,7 +141,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -461,7 +461,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -495,7 +495,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.domainmappings.html b/docs/dyn/run_v1.namespaces.domainmappings.html index fbbb1ae26b2..fd7ff3c5c6c 100644 --- a/docs/dyn/run_v1.namespaces.domainmappings.html +++ b/docs/dyn/run_v1.namespaces.domainmappings.html @@ -108,7 +108,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -181,7 +181,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -305,7 +305,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -394,7 +394,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.executions.html b/docs/dyn/run_v1.namespaces.executions.html index 41b91563c81..8e06dcec071 100644 --- a/docs/dyn/run_v1.namespaces.executions.html +++ b/docs/dyn/run_v1.namespaces.executions.html @@ -114,7 +114,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -473,7 +473,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -793,7 +793,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.jobs.html b/docs/dyn/run_v1.namespaces.jobs.html index bdf25dbaee2..bb9619ee62e 100644 --- a/docs/dyn/run_v1.namespaces.jobs.html +++ b/docs/dyn/run_v1.namespaces.jobs.html @@ -114,7 +114,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -150,7 +150,7 @@

Method Details

"startExecutionToken": "A String", # A unique string used as a suffix for creating a new execution. The Job will become ready when the execution is successfully started. The sum of job name and token length must be fewer than 63 characters. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -455,7 +455,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -491,7 +491,7 @@

Method Details

"startExecutionToken": "A String", # A unique string used as a suffix for creating a new execution. The Job will become ready when the execution is successfully started. The sum of job name and token length must be fewer than 63 characters. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -847,7 +847,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -883,7 +883,7 @@

Method Details

"startExecutionToken": "A String", # A unique string used as a suffix for creating a new execution. The Job will become ready when the execution is successfully started. The sum of job name and token length must be fewer than 63 characters. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1205,7 +1205,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1241,7 +1241,7 @@

Method Details

"startExecutionToken": "A String", # A unique string used as a suffix for creating a new execution. The Job will become ready when the execution is successfully started. The sum of job name and token length must be fewer than 63 characters. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1559,7 +1559,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1595,7 +1595,7 @@

Method Details

"startExecutionToken": "A String", # A unique string used as a suffix for creating a new execution. The Job will become ready when the execution is successfully started. The sum of job name and token length must be fewer than 63 characters. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1900,7 +1900,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1936,7 +1936,7 @@

Method Details

"startExecutionToken": "A String", # A unique string used as a suffix for creating a new execution. The Job will become ready when the execution is successfully started. The sum of job name and token length must be fewer than 63 characters. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2291,7 +2291,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.revisions.html b/docs/dyn/run_v1.namespaces.revisions.html index 36dd49e7724..c0ad6adb171 100644 --- a/docs/dyn/run_v1.namespaces.revisions.html +++ b/docs/dyn/run_v1.namespaces.revisions.html @@ -155,7 +155,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -475,7 +475,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.routes.html b/docs/dyn/run_v1.namespaces.routes.html index c927e9b6abc..fc22b4f5367 100644 --- a/docs/dyn/run_v1.namespaces.routes.html +++ b/docs/dyn/run_v1.namespaces.routes.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -208,7 +208,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.services.html b/docs/dyn/run_v1.namespaces.services.html index cc7257d4f54..493968d95bc 100644 --- a/docs/dyn/run_v1.namespaces.services.html +++ b/docs/dyn/run_v1.namespaces.services.html @@ -111,7 +111,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -145,7 +145,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -473,7 +473,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -507,7 +507,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -886,7 +886,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -920,7 +920,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1264,7 +1264,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1298,7 +1298,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1638,7 +1638,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1672,7 +1672,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2000,7 +2000,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2034,7 +2034,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.tasks.html b/docs/dyn/run_v1.namespaces.tasks.html index 52ac002c104..45c7eea8218 100644 --- a/docs/dyn/run_v1.namespaces.tasks.html +++ b/docs/dyn/run_v1.namespaces.tasks.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -430,7 +430,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.configurations.html b/docs/dyn/run_v1.projects.locations.configurations.html index ea9ef507411..da933076578 100644 --- a/docs/dyn/run_v1.projects.locations.configurations.html +++ b/docs/dyn/run_v1.projects.locations.configurations.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -141,7 +141,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -461,7 +461,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -495,7 +495,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.domainmappings.html b/docs/dyn/run_v1.projects.locations.domainmappings.html index f1f049be897..78a9f303811 100644 --- a/docs/dyn/run_v1.projects.locations.domainmappings.html +++ b/docs/dyn/run_v1.projects.locations.domainmappings.html @@ -108,7 +108,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -181,7 +181,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -305,7 +305,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -394,7 +394,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.revisions.html b/docs/dyn/run_v1.projects.locations.revisions.html index 8cad2ff78ab..a02f9137706 100644 --- a/docs/dyn/run_v1.projects.locations.revisions.html +++ b/docs/dyn/run_v1.projects.locations.revisions.html @@ -155,7 +155,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -475,7 +475,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.routes.html b/docs/dyn/run_v1.projects.locations.routes.html index 77bf9d37a74..38c1bc6b4b8 100644 --- a/docs/dyn/run_v1.projects.locations.routes.html +++ b/docs/dyn/run_v1.projects.locations.routes.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -208,7 +208,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.services.html b/docs/dyn/run_v1.projects.locations.services.html index 5b9749def40..9611b1358e0 100644 --- a/docs/dyn/run_v1.projects.locations.services.html +++ b/docs/dyn/run_v1.projects.locations.services.html @@ -120,7 +120,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -154,7 +154,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -482,7 +482,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -516,7 +516,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -895,7 +895,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -929,7 +929,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1321,7 +1321,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1355,7 +1355,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1695,7 +1695,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1729,7 +1729,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2057,7 +2057,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2091,7 +2091,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v2.projects.locations.services.html b/docs/dyn/run_v2.projects.locations.services.html index 4e477298ef5..154e7f8df26 100644 --- a/docs/dyn/run_v2.projects.locations.services.html +++ b/docs/dyn/run_v2.projects.locations.services.html @@ -159,6 +159,7 @@

Method Details

"expireTime": "A String", # Output only. For a deleted resource, the time after which it will be permamently deleted. "generation": "A String", # Output only. A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "ingress": "A String", # Optional. Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. + "invokerIamDisabled": True or False, # Optional. Disables IAM permission check for run.routes.invoke for callers of this service. This setting should not be used with external ingress. "labels": { # Optional. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. "a_key": "A String", }, @@ -171,7 +172,7 @@

Method Details

"reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. "scaling": { # Scaling settings applied at the service level rather than at the revision level. # Optional. Specifies service-level scaling settings - "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA) + "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. "scalingMode": "A String", # Optional. The scaling mode for the service. }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. @@ -492,6 +493,7 @@

Method Details

"expireTime": "A String", # Output only. For a deleted resource, the time after which it will be permamently deleted. "generation": "A String", # Output only. A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "ingress": "A String", # Optional. Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. + "invokerIamDisabled": True or False, # Optional. Disables IAM permission check for run.routes.invoke for callers of this service. This setting should not be used with external ingress. "labels": { # Optional. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. "a_key": "A String", }, @@ -504,7 +506,7 @@

Method Details

"reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. "scaling": { # Scaling settings applied at the service level rather than at the revision level. # Optional. Specifies service-level scaling settings - "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA) + "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. "scalingMode": "A String", # Optional. The scaling mode for the service. }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. @@ -812,6 +814,7 @@

Method Details

"expireTime": "A String", # Output only. For a deleted resource, the time after which it will be permamently deleted. "generation": "A String", # Output only. A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "ingress": "A String", # Optional. Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. + "invokerIamDisabled": True or False, # Optional. Disables IAM permission check for run.routes.invoke for callers of this service. This setting should not be used with external ingress. "labels": { # Optional. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. "a_key": "A String", }, @@ -824,7 +827,7 @@

Method Details

"reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. "scaling": { # Scaling settings applied at the service level rather than at the revision level. # Optional. Specifies service-level scaling settings - "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA) + "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. "scalingMode": "A String", # Optional. The scaling mode for the service. }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. @@ -1089,6 +1092,7 @@

Method Details

"expireTime": "A String", # Output only. For a deleted resource, the time after which it will be permamently deleted. "generation": "A String", # Output only. A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "ingress": "A String", # Optional. Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. + "invokerIamDisabled": True or False, # Optional. Disables IAM permission check for run.routes.invoke for callers of this service. This setting should not be used with external ingress. "labels": { # Optional. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 Service. "a_key": "A String", }, @@ -1101,7 +1105,7 @@

Method Details

"reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. "scaling": { # Scaling settings applied at the service level rather than at the revision level. # Optional. Specifies service-level scaling settings - "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA) + "minInstanceCount": 42, # Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. "scalingMode": "A String", # Optional. The scaling mode for the service. }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. diff --git a/docs/dyn/searchads360_v0.customers.searchAds360.html b/docs/dyn/searchads360_v0.customers.searchAds360.html index 5a32677d2d5..bcdb2844fe3 100644 --- a/docs/dyn/searchads360_v0.customers.searchAds360.html +++ b/docs/dyn/searchads360_v0.customers.searchAds360.html @@ -185,6 +185,9 @@

Method Details

"adRotationMode": "A String", # The ad rotation mode of the ad group. "cpcBidMicros": "A String", # The maximum CPC (cost-per-click) bid. "creationTime": "A String", # Output only. The timestamp when this ad_group was created. The timestamp is in the customer's time zone and in "yyyy-MM-dd HH:mm:ss" format. + "effectiveLabels": [ # Output only. The resource names of effective labels attached to this ad group. An effective label is a label inherited or directly assigned to this ad group. + "A String", + ], "endDate": "A String", # Output only. Date when the ad group ends serving ads. By default, the ad group ends on the ad group's end date. If this field is set, then the ad group ends at the end of the specified date in the customer's time zone. This field is only available for Microsoft Advertising and Facebook gateway accounts. Format: YYYY-MM-DD Example: 2019-03-14 "engineId": "A String", # Output only. ID of the ad group in the external engine account. This field is for non-Google Ads account only, for example, Yahoo Japan, Microsoft, Baidu etc. For Google Ads entity, use "ad_group.id" instead. "engineStatus": "A String", # Output only. The Engine Status for ad group. @@ -362,6 +365,12 @@

Method Details

"ownerCustomerId": "A String", # Output only. The ID of the Customer which owns the label. "resourceName": "A String", # Immutable. The resource name of the ad group criterion label. Ad group criterion label resource names have the form: `customers/{customer_id}/adGroupCriterionLabels/{ad_group_id}~{criterion_id}~{label_id}` }, + "adGroupEffectiveLabel": { # A relationship between an ad group and an effective label. An effective label is a label inherited or directly assigned to this ad group. # The ad group effective label referenced in the query. + "adGroup": "A String", # Immutable. The ad group to which the effective label is attached. + "label": "A String", # Immutable. The effective label assigned to the ad group. + "ownerCustomerId": "A String", # Output only. The ID of the Customer which owns the effective label. + "resourceName": "A String", # Immutable. The resource name of the ad group effective label. Ad group effective label resource names have the form: `customers/{customer_id}/adGroupEffectiveLabels/{ad_group_id}~{label_id}` + }, "adGroupLabel": { # A relationship between an ad group and a label. # The ad group label referenced in the query. "adGroup": "A String", # Immutable. The ad group to which the label is attached. "label": "A String", # Immutable. The label assigned to the ad group. @@ -654,10 +663,10 @@

Method Details

"adServingOptimizationStatus": "A String", # The ad serving optimization status of the campaign. "advertisingChannelSubType": "A String", # Immutable. Optional refinement to `advertising_channel_type`. Must be a valid sub-type of the parent channel type. Can be set only when creating campaigns. After campaign is created, the field can not be changed. "advertisingChannelType": "A String", # Immutable. The primary serving target for ads within the campaign. The targeting options can be refined in `network_settings`. This field is required and should not be empty when creating new campaigns. Can be set only when creating campaigns. After the campaign is created, the field can not be changed. - "biddingStrategy": "A String", # Portfolio bidding strategy used by campaign. + "biddingStrategy": "A String", # The resource name of the portfolio bidding strategy used by the campaign. "biddingStrategySystemStatus": "A String", # Output only. The system status of the campaign's bidding strategy. "biddingStrategyType": "A String", # Output only. The type of bidding strategy. A bidding strategy can be created by setting either the bidding scheme to create a standard bidding strategy or the `bidding_strategy` field to create a portfolio bidding strategy. This field is read-only. - "campaignBudget": "A String", # The budget of the campaign. + "campaignBudget": "A String", # The resource name of the campaign budget of the campaign. "createTime": "A String", # Output only. The timestamp when this campaign was created. The timestamp is in the customer's time zone and in "yyyy-MM-dd HH:mm:ss" format. create_time will be deprecated in v1. Use creation_time instead. "creationTime": "A String", # Output only. The timestamp when this campaign was created. The timestamp is in the customer's time zone and in "yyyy-MM-dd HH:mm:ss" format. "dynamicSearchAdsSetting": { # The setting for controlling Dynamic Search Ads (DSA). # The setting for controlling Dynamic Search Ads (DSA). @@ -665,6 +674,9 @@

Method Details

"languageCode": "A String", # Required. The language code specifying the language of the domain, for example, "en". "useSuppliedUrlsOnly": True or False, # Whether the campaign uses advertiser supplied URLs exclusively. }, + "effectiveLabels": [ # Output only. The resource names of effective labels attached to this campaign. An effective label is a label inherited or directly assigned to this campaign. + "A String", + ], "endDate": "A String", # The last day of the campaign in serving customer's timezone in YYYY-MM-DD format. On create, defaults to 2037-12-30, which means the campaign will run indefinitely. To set an existing campaign to run indefinitely, set this field to 2037-12-30. "engineId": "A String", # Output only. ID of the campaign in the external engine account. This field is for non-Google Ads account only, for example, Yahoo Japan, Microsoft, Baidu etc. For Google Ads entity, use "campaign.id" instead. "excludedParentAssetFieldTypes": [ # The asset field types that should be excluded from this campaign. Asset links with these field types will not be inherited by this campaign from the upper level. @@ -722,7 +734,7 @@

Method Details

}, "resourceName": "A String", # Immutable. The resource name of the campaign. Campaign resource names have the form: `customers/{customer_id}/campaigns/{campaign_id}` "selectiveOptimization": { # Selective optimization setting for this campaign, which includes a set of conversion actions to optimize this campaign towards. This feature only applies to app campaigns that use MULTI_CHANNEL as AdvertisingChannelType and APP_CAMPAIGN or APP_CAMPAIGN_FOR_ENGAGEMENT as AdvertisingChannelSubType. # Selective optimization setting for this campaign, which includes a set of conversion actions to optimize this campaign towards. This feature only applies to app campaigns that use MULTI_CHANNEL as AdvertisingChannelType and APP_CAMPAIGN or APP_CAMPAIGN_FOR_ENGAGEMENT as AdvertisingChannelSubType. - "conversionActions": [ # The selected set of conversion actions for optimizing this campaign. + "conversionActions": [ # The selected set of resource names for conversion actions for optimizing this campaign. "A String", ], }, @@ -844,6 +856,12 @@

Method Details

"criterionName": "A String", # The name of the criterion that is defined by this parameter. The name value will be used for identifying, sorting and filtering criteria with this type of parameters. This field is required for CREATE operations and is prohibited on UPDATE operations. }, }, + "campaignEffectiveLabel": { # Represents a relationship between a campaign and an effective label. An effective label is a label inherited or directly assigned to this campaign. # The campaign effective label referenced in the query. + "campaign": "A String", # Immutable. The campaign to which the effective label is attached. + "label": "A String", # Immutable. The effective label assigned to the campaign. + "ownerCustomerId": "A String", # Output only. The ID of the Customer which owns the effective label. + "resourceName": "A String", # Immutable. Name of the resource. CampaignEffectivelabel resource names have the form: `customers/{customer_id}/campaignEffectiveLabels/{campaign_id}~{label_id}` + }, "campaignLabel": { # Represents a relationship between a campaign and a label. # The campaign label referenced in the query. "campaign": "A String", # Immutable. The campaign to which the label is attached. "label": "A String", # Immutable. The label assigned to the campaign. @@ -1095,12 +1113,16 @@

Method Details

"costPerConversion": 3.14, # Average conversion eligible cost per biddable conversion. "costPerCurrentModelAttributedConversion": 3.14, # The cost of ad interactions divided by current model attributed conversions. This only includes conversion actions which include_in_conversions_metric attribute is set to true. If you use conversion-based bidding, your bid strategies will optimize for these conversions. "crossDeviceConversions": 3.14, # Conversions from when a customer clicks on an ad on one device, then converts on a different device or browser. Cross-device conversions are already included in all_conversions. + "crossDeviceConversionsByConversionDate": 3.14, # The number of cross-device conversions by conversion date. Details for the by_conversion_date columns are available at https://support.google.com/sa360/answer/9250611. "crossDeviceConversionsValue": 3.14, # The sum of the value of cross-device conversions. + "crossDeviceConversionsValueByConversionDate": 3.14, # The sum of cross-device conversions value by conversion date. Details for the by_conversion_date columns are available at https://support.google.com/sa360/answer/9250611. "crossSellCostOfGoodsSoldMicros": "A String", # Cross-sell cost of goods sold (COGS) is the total cost of products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell cost of goods sold is the total cost of the products sold that weren't advertised. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The hat has a cost of goods sold value of $3, the shirt has a cost of goods sold value of $5. The cross-sell cost of goods sold for this order is $5. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause "crossSellGrossProfitMicros": "A String", # Cross-sell gross profit is the profit you made from products sold as a result of advertising a different product, minus cost of goods sold (COGS). How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the purchase is a sold product. If these products don't match then this is considered cross-sell. Cross-sell gross profit is the revenue you made from cross-sell attributed to your ads minus the cost of the goods sold. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The shirt is priced $20 and has a cost of goods sold value of $5. The cross-sell gross profit of this order is $15 = $20 - $5. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause "crossSellRevenueMicros": "A String", # Cross-sell revenue is the total amount you made from products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell revenue is the total value you made from cross-sell attributed to your ads. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The hat is priced $10 and the shirt is priced $20. The cross-sell revenue of this order is $20. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause "crossSellUnitsSold": 3.14, # Cross-sell units sold is the total number of products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell units sold is the total number of cross-sold products from all orders attributed to your ads. Example: Someone clicked on a Shopping ad for a hat then bought the same hat, a shirt and a jacket. The cross-sell units sold in this order is 2. This metric is only available if you report conversions with cart data. "ctr": 3.14, # The number of clicks your ad receives (Clicks) divided by the number of times your ad is shown (Impressions). + "generalInvalidClickRate": 3.14, # The percentage of clicks that have been filtered out of your total number of clicks (filtered + non-filtered clicks) due to being general invalid clicks. These are clicks Google considers illegitimate that are detected through routine means of filtration (that is, known invalid data-center traffic, bots and spiders or other crawlers, irregular patterns, etc). You're not charged for them, and they don't affect your account statistics. See the help page at https://support.google.com/campaignmanager/answer/6076504 for details. + "generalInvalidClicks": "A String", # Number of general invalid clicks. These are a subset of your invalid clicks that are detected through routine means of filtration (such as known invalid data-center traffic, bots and spiders or other crawlers, irregular patterns, etc.). You're not charged for them, and they don't affect your account statistics. See the help page at https://support.google.com/campaignmanager/answer/6076504 for details. "historicalCreativeQualityScore": "A String", # The creative historical quality score. "historicalLandingPageQualityScore": "A String", # The quality of historical landing page experience. "historicalQualityScore": "A String", # The historical quality score. @@ -1313,6 +1335,9 @@

Method Details

"adRotationMode": "A String", # The ad rotation mode of the ad group. "cpcBidMicros": "A String", # The maximum CPC (cost-per-click) bid. "creationTime": "A String", # Output only. The timestamp when this ad_group was created. The timestamp is in the customer's time zone and in "yyyy-MM-dd HH:mm:ss" format. + "effectiveLabels": [ # Output only. The resource names of effective labels attached to this ad group. An effective label is a label inherited or directly assigned to this ad group. + "A String", + ], "endDate": "A String", # Output only. Date when the ad group ends serving ads. By default, the ad group ends on the ad group's end date. If this field is set, then the ad group ends at the end of the specified date in the customer's time zone. This field is only available for Microsoft Advertising and Facebook gateway accounts. Format: YYYY-MM-DD Example: 2019-03-14 "engineId": "A String", # Output only. ID of the ad group in the external engine account. This field is for non-Google Ads account only, for example, Yahoo Japan, Microsoft, Baidu etc. For Google Ads entity, use "ad_group.id" instead. "engineStatus": "A String", # Output only. The Engine Status for ad group. @@ -1490,6 +1515,12 @@

Method Details

"ownerCustomerId": "A String", # Output only. The ID of the Customer which owns the label. "resourceName": "A String", # Immutable. The resource name of the ad group criterion label. Ad group criterion label resource names have the form: `customers/{customer_id}/adGroupCriterionLabels/{ad_group_id}~{criterion_id}~{label_id}` }, + "adGroupEffectiveLabel": { # A relationship between an ad group and an effective label. An effective label is a label inherited or directly assigned to this ad group. # The ad group effective label referenced in the query. + "adGroup": "A String", # Immutable. The ad group to which the effective label is attached. + "label": "A String", # Immutable. The effective label assigned to the ad group. + "ownerCustomerId": "A String", # Output only. The ID of the Customer which owns the effective label. + "resourceName": "A String", # Immutable. The resource name of the ad group effective label. Ad group effective label resource names have the form: `customers/{customer_id}/adGroupEffectiveLabels/{ad_group_id}~{label_id}` + }, "adGroupLabel": { # A relationship between an ad group and a label. # The ad group label referenced in the query. "adGroup": "A String", # Immutable. The ad group to which the label is attached. "label": "A String", # Immutable. The label assigned to the ad group. @@ -1782,10 +1813,10 @@

Method Details

"adServingOptimizationStatus": "A String", # The ad serving optimization status of the campaign. "advertisingChannelSubType": "A String", # Immutable. Optional refinement to `advertising_channel_type`. Must be a valid sub-type of the parent channel type. Can be set only when creating campaigns. After campaign is created, the field can not be changed. "advertisingChannelType": "A String", # Immutable. The primary serving target for ads within the campaign. The targeting options can be refined in `network_settings`. This field is required and should not be empty when creating new campaigns. Can be set only when creating campaigns. After the campaign is created, the field can not be changed. - "biddingStrategy": "A String", # Portfolio bidding strategy used by campaign. + "biddingStrategy": "A String", # The resource name of the portfolio bidding strategy used by the campaign. "biddingStrategySystemStatus": "A String", # Output only. The system status of the campaign's bidding strategy. "biddingStrategyType": "A String", # Output only. The type of bidding strategy. A bidding strategy can be created by setting either the bidding scheme to create a standard bidding strategy or the `bidding_strategy` field to create a portfolio bidding strategy. This field is read-only. - "campaignBudget": "A String", # The budget of the campaign. + "campaignBudget": "A String", # The resource name of the campaign budget of the campaign. "createTime": "A String", # Output only. The timestamp when this campaign was created. The timestamp is in the customer's time zone and in "yyyy-MM-dd HH:mm:ss" format. create_time will be deprecated in v1. Use creation_time instead. "creationTime": "A String", # Output only. The timestamp when this campaign was created. The timestamp is in the customer's time zone and in "yyyy-MM-dd HH:mm:ss" format. "dynamicSearchAdsSetting": { # The setting for controlling Dynamic Search Ads (DSA). # The setting for controlling Dynamic Search Ads (DSA). @@ -1793,6 +1824,9 @@

Method Details

"languageCode": "A String", # Required. The language code specifying the language of the domain, for example, "en". "useSuppliedUrlsOnly": True or False, # Whether the campaign uses advertiser supplied URLs exclusively. }, + "effectiveLabels": [ # Output only. The resource names of effective labels attached to this campaign. An effective label is a label inherited or directly assigned to this campaign. + "A String", + ], "endDate": "A String", # The last day of the campaign in serving customer's timezone in YYYY-MM-DD format. On create, defaults to 2037-12-30, which means the campaign will run indefinitely. To set an existing campaign to run indefinitely, set this field to 2037-12-30. "engineId": "A String", # Output only. ID of the campaign in the external engine account. This field is for non-Google Ads account only, for example, Yahoo Japan, Microsoft, Baidu etc. For Google Ads entity, use "campaign.id" instead. "excludedParentAssetFieldTypes": [ # The asset field types that should be excluded from this campaign. Asset links with these field types will not be inherited by this campaign from the upper level. @@ -1850,7 +1884,7 @@

Method Details

}, "resourceName": "A String", # Immutable. The resource name of the campaign. Campaign resource names have the form: `customers/{customer_id}/campaigns/{campaign_id}` "selectiveOptimization": { # Selective optimization setting for this campaign, which includes a set of conversion actions to optimize this campaign towards. This feature only applies to app campaigns that use MULTI_CHANNEL as AdvertisingChannelType and APP_CAMPAIGN or APP_CAMPAIGN_FOR_ENGAGEMENT as AdvertisingChannelSubType. # Selective optimization setting for this campaign, which includes a set of conversion actions to optimize this campaign towards. This feature only applies to app campaigns that use MULTI_CHANNEL as AdvertisingChannelType and APP_CAMPAIGN or APP_CAMPAIGN_FOR_ENGAGEMENT as AdvertisingChannelSubType. - "conversionActions": [ # The selected set of conversion actions for optimizing this campaign. + "conversionActions": [ # The selected set of resource names for conversion actions for optimizing this campaign. "A String", ], }, @@ -1972,6 +2006,12 @@

Method Details

"criterionName": "A String", # The name of the criterion that is defined by this parameter. The name value will be used for identifying, sorting and filtering criteria with this type of parameters. This field is required for CREATE operations and is prohibited on UPDATE operations. }, }, + "campaignEffectiveLabel": { # Represents a relationship between a campaign and an effective label. An effective label is a label inherited or directly assigned to this campaign. # The campaign effective label referenced in the query. + "campaign": "A String", # Immutable. The campaign to which the effective label is attached. + "label": "A String", # Immutable. The effective label assigned to the campaign. + "ownerCustomerId": "A String", # Output only. The ID of the Customer which owns the effective label. + "resourceName": "A String", # Immutable. Name of the resource. CampaignEffectivelabel resource names have the form: `customers/{customer_id}/campaignEffectiveLabels/{campaign_id}~{label_id}` + }, "campaignLabel": { # Represents a relationship between a campaign and a label. # The campaign label referenced in the query. "campaign": "A String", # Immutable. The campaign to which the label is attached. "label": "A String", # Immutable. The label assigned to the campaign. @@ -2223,12 +2263,16 @@

Method Details

"costPerConversion": 3.14, # Average conversion eligible cost per biddable conversion. "costPerCurrentModelAttributedConversion": 3.14, # The cost of ad interactions divided by current model attributed conversions. This only includes conversion actions which include_in_conversions_metric attribute is set to true. If you use conversion-based bidding, your bid strategies will optimize for these conversions. "crossDeviceConversions": 3.14, # Conversions from when a customer clicks on an ad on one device, then converts on a different device or browser. Cross-device conversions are already included in all_conversions. + "crossDeviceConversionsByConversionDate": 3.14, # The number of cross-device conversions by conversion date. Details for the by_conversion_date columns are available at https://support.google.com/sa360/answer/9250611. "crossDeviceConversionsValue": 3.14, # The sum of the value of cross-device conversions. + "crossDeviceConversionsValueByConversionDate": 3.14, # The sum of cross-device conversions value by conversion date. Details for the by_conversion_date columns are available at https://support.google.com/sa360/answer/9250611. "crossSellCostOfGoodsSoldMicros": "A String", # Cross-sell cost of goods sold (COGS) is the total cost of products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell cost of goods sold is the total cost of the products sold that weren't advertised. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The hat has a cost of goods sold value of $3, the shirt has a cost of goods sold value of $5. The cross-sell cost of goods sold for this order is $5. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause "crossSellGrossProfitMicros": "A String", # Cross-sell gross profit is the profit you made from products sold as a result of advertising a different product, minus cost of goods sold (COGS). How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the purchase is a sold product. If these products don't match then this is considered cross-sell. Cross-sell gross profit is the revenue you made from cross-sell attributed to your ads minus the cost of the goods sold. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The shirt is priced $20 and has a cost of goods sold value of $5. The cross-sell gross profit of this order is $15 = $20 - $5. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause "crossSellRevenueMicros": "A String", # Cross-sell revenue is the total amount you made from products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell revenue is the total value you made from cross-sell attributed to your ads. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The hat is priced $10 and the shirt is priced $20. The cross-sell revenue of this order is $20. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause "crossSellUnitsSold": 3.14, # Cross-sell units sold is the total number of products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell units sold is the total number of cross-sold products from all orders attributed to your ads. Example: Someone clicked on a Shopping ad for a hat then bought the same hat, a shirt and a jacket. The cross-sell units sold in this order is 2. This metric is only available if you report conversions with cart data. "ctr": 3.14, # The number of clicks your ad receives (Clicks) divided by the number of times your ad is shown (Impressions). + "generalInvalidClickRate": 3.14, # The percentage of clicks that have been filtered out of your total number of clicks (filtered + non-filtered clicks) due to being general invalid clicks. These are clicks Google considers illegitimate that are detected through routine means of filtration (that is, known invalid data-center traffic, bots and spiders or other crawlers, irregular patterns, etc). You're not charged for them, and they don't affect your account statistics. See the help page at https://support.google.com/campaignmanager/answer/6076504 for details. + "generalInvalidClicks": "A String", # Number of general invalid clicks. These are a subset of your invalid clicks that are detected through routine means of filtration (such as known invalid data-center traffic, bots and spiders or other crawlers, irregular patterns, etc.). You're not charged for them, and they don't affect your account statistics. See the help page at https://support.google.com/campaignmanager/answer/6076504 for details. "historicalCreativeQualityScore": "A String", # The creative historical quality score. "historicalLandingPageQualityScore": "A String", # The quality of historical landing page experience. "historicalQualityScore": "A String", # The historical quality score. diff --git a/docs/dyn/securitycenter_v1.folders.locations.muteConfigs.html b/docs/dyn/securitycenter_v1.folders.locations.muteConfigs.html index d289e1a4480..e9f6014a475 100644 --- a/docs/dyn/securitycenter_v1.folders.locations.muteConfigs.html +++ b/docs/dyn/securitycenter_v1.folders.locations.muteConfigs.html @@ -77,21 +77,12 @@

Instance Methods

close()

Close httplib2 connections.

-

- create(parent, body=None, muteConfigId=None, x__xgafv=None)

-

Creates a mute config.

delete(name, x__xgafv=None)

Deletes an existing mute config.

get(name, x__xgafv=None)

Gets a mute config.

-

- list(parent, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists mute configs.

-

- list_next()

-

Retrieves the next page of results.

patch(name, body=None, updateMask=None, x__xgafv=None)

Updates a mute config.

@@ -101,49 +92,6 @@

Method Details

Close httplib2 connections.
-
- create(parent, body=None, muteConfigId=None, x__xgafv=None) -
Creates a mute config.
-
-Args:
-  parent: string, Required. Resource name of the new mute configs's parent. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, or `projects/[project_id]`. (required)
-  body: object, The request body.
-    The object takes the form of:
-
-{ # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-  "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-  "description": "A String", # A description of the mute config.
-  "displayName": "A String", # The human readable name to be displayed for the mute config.
-  "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-  "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-  "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-  "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-  "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-  "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-}
-
-  muteConfigId: string, Required. Unique identifier provided by the client within the parent scope. It must consist of only lowercase letters, numbers, and hyphens, must start with a letter, must end with either a letter or a number, and must be 63 characters or less.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-  "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-  "description": "A String", # A description of the mute config.
-  "displayName": "A String", # The human readable name to be displayed for the mute config.
-  "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-  "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-  "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-  "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-  "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-  "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-}
-
-
delete(name, x__xgafv=None)
Deletes an existing mute config.
@@ -189,54 +137,6 @@ 

Method Details

}
-
- list(parent, pageSize=None, pageToken=None, x__xgafv=None) -
Lists mute configs.
-
-Args:
-  parent: string, Required. The parent, which owns the collection of mute configs. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, `projects/[project_id]`. (required)
-  pageSize: integer, The maximum number of configs to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
-  pageToken: string, A page token, received from a previous `ListMuteConfigs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListMuteConfigs` must match the call that provided the page token.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Response message for listing mute configs.
-  "muteConfigs": [ # The mute configs from the specified parent.
-    { # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-      "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-      "description": "A String", # A description of the mute config.
-      "displayName": "A String", # The human readable name to be displayed for the mute config.
-      "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-      "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-      "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-      "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-      "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-      "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-    },
-  ],
-  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
-}
-
- -
- list_next() -
Retrieves the next page of results.
-
-        Args:
-          previous_request: The request for the previous page. (required)
-          previous_response: The response from the request for the previous page. (required)
-
-        Returns:
-          A request object that you can call 'execute()' on to request the next
-          page. Returns None if there are no more items in the collection.
-        
-
-
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a mute config.
diff --git a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html
index 3dbd910c5fb..16639b95a7f 100644
--- a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html
+++ b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.customModules.html
@@ -122,6 +122,9 @@ 

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -168,6 +171,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -239,6 +245,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -297,6 +306,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -357,6 +369,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -435,6 +450,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -482,6 +500,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -529,6 +550,9 @@

Method Details

{ # Request message to simulate a CustomConfig against a given test resource. Maximum size of the request is 4 MB by default. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Required. The custom configuration that you need to test. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. diff --git a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.effectiveCustomModules.html b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.effectiveCustomModules.html index 927f071ff63..28488895056 100644 --- a/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.effectiveCustomModules.html +++ b/docs/dyn/securitycenter_v1.folders.securityHealthAnalyticsSettings.effectiveCustomModules.html @@ -108,6 +108,9 @@

Method Details

{ # An EffectiveSecurityHealthAnalyticsCustomModule is the representation of a Security Health Analytics custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablementState` property in EffectiveSecurityHealthAnalyticsCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective enablement_state for the module in all child folders or projects is also `enabled`. EffectiveSecurityHealthAnalyticsCustomModule is read-only. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Output only. The user-specified configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -162,6 +165,9 @@

Method Details

"effectiveSecurityHealthAnalyticsCustomModules": [ # Effective custom modules belonging to the requested parent. { # An EffectiveSecurityHealthAnalyticsCustomModule is the representation of a Security Health Analytics custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablementState` property in EffectiveSecurityHealthAnalyticsCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective enablement_state for the module in all child folders or projects is also `enabled`. EffectiveSecurityHealthAnalyticsCustomModule is read-only. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Output only. The user-specified configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. diff --git a/docs/dyn/securitycenter_v1.folders.sources.findings.html b/docs/dyn/securitycenter_v1.folders.sources.findings.html index 9d76cefa149..ebc5544a298 100644 --- a/docs/dyn/securitycenter_v1.folders.sources.findings.html +++ b/docs/dyn/securitycenter_v1.folders.sources.findings.html @@ -782,6 +782,7 @@

Method Details

}, ], "resourceGroup": { # Represents an Azure resource group. # The Azure resource group associated with the resource. + "id": "A String", # The ID of the Azure resource group. "name": "A String", # The name of the Azure resource group. This is not a UUID. }, "subscription": { # Represents an Azure subscription. # The Azure subscription associated with the resource. diff --git a/docs/dyn/securitycenter_v1.organizations.locations.muteConfigs.html b/docs/dyn/securitycenter_v1.organizations.locations.muteConfigs.html index d22bb158a59..0a43db42c3a 100644 --- a/docs/dyn/securitycenter_v1.organizations.locations.muteConfigs.html +++ b/docs/dyn/securitycenter_v1.organizations.locations.muteConfigs.html @@ -77,21 +77,12 @@

Instance Methods

close()

Close httplib2 connections.

-

- create(parent, body=None, muteConfigId=None, x__xgafv=None)

-

Creates a mute config.

delete(name, x__xgafv=None)

Deletes an existing mute config.

get(name, x__xgafv=None)

Gets a mute config.

-

- list(parent, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists mute configs.

-

- list_next()

-

Retrieves the next page of results.

patch(name, body=None, updateMask=None, x__xgafv=None)

Updates a mute config.

@@ -101,49 +92,6 @@

Method Details

Close httplib2 connections.
-
- create(parent, body=None, muteConfigId=None, x__xgafv=None) -
Creates a mute config.
-
-Args:
-  parent: string, Required. Resource name of the new mute configs's parent. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, or `projects/[project_id]`. (required)
-  body: object, The request body.
-    The object takes the form of:
-
-{ # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-  "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-  "description": "A String", # A description of the mute config.
-  "displayName": "A String", # The human readable name to be displayed for the mute config.
-  "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-  "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-  "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-  "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-  "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-  "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-}
-
-  muteConfigId: string, Required. Unique identifier provided by the client within the parent scope. It must consist of only lowercase letters, numbers, and hyphens, must start with a letter, must end with either a letter or a number, and must be 63 characters or less.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-  "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-  "description": "A String", # A description of the mute config.
-  "displayName": "A String", # The human readable name to be displayed for the mute config.
-  "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-  "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-  "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-  "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-  "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-  "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-}
-
-
delete(name, x__xgafv=None)
Deletes an existing mute config.
@@ -189,54 +137,6 @@ 

Method Details

}
-
- list(parent, pageSize=None, pageToken=None, x__xgafv=None) -
Lists mute configs.
-
-Args:
-  parent: string, Required. The parent, which owns the collection of mute configs. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, `projects/[project_id]`. (required)
-  pageSize: integer, The maximum number of configs to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
-  pageToken: string, A page token, received from a previous `ListMuteConfigs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListMuteConfigs` must match the call that provided the page token.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Response message for listing mute configs.
-  "muteConfigs": [ # The mute configs from the specified parent.
-    { # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-      "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-      "description": "A String", # A description of the mute config.
-      "displayName": "A String", # The human readable name to be displayed for the mute config.
-      "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-      "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-      "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-      "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-      "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-      "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-    },
-  ],
-  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
-}
-
- -
- list_next() -
Retrieves the next page of results.
-
-        Args:
-          previous_request: The request for the previous page. (required)
-          previous_response: The response from the request for the previous page. (required)
-
-        Returns:
-          A request object that you can call 'execute()' on to request the next
-          page. Returns None if there are no more items in the collection.
-        
-
-
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a mute config.
diff --git a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html
index ac3419c5de8..7e30880e221 100644
--- a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html
+++ b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.customModules.html
@@ -122,6 +122,9 @@ 

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -168,6 +171,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -239,6 +245,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -297,6 +306,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -357,6 +369,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -435,6 +450,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -482,6 +500,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -529,6 +550,9 @@

Method Details

{ # Request message to simulate a CustomConfig against a given test resource. Maximum size of the request is 4 MB by default. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Required. The custom configuration that you need to test. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. diff --git a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.effectiveCustomModules.html b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.effectiveCustomModules.html index a94a6503ecd..479ddba7fa1 100644 --- a/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.effectiveCustomModules.html +++ b/docs/dyn/securitycenter_v1.organizations.securityHealthAnalyticsSettings.effectiveCustomModules.html @@ -108,6 +108,9 @@

Method Details

{ # An EffectiveSecurityHealthAnalyticsCustomModule is the representation of a Security Health Analytics custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablementState` property in EffectiveSecurityHealthAnalyticsCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective enablement_state for the module in all child folders or projects is also `enabled`. EffectiveSecurityHealthAnalyticsCustomModule is read-only. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Output only. The user-specified configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -162,6 +165,9 @@

Method Details

"effectiveSecurityHealthAnalyticsCustomModules": [ # Effective custom modules belonging to the requested parent. { # An EffectiveSecurityHealthAnalyticsCustomModule is the representation of a Security Health Analytics custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablementState` property in EffectiveSecurityHealthAnalyticsCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective enablement_state for the module in all child folders or projects is also `enabled`. EffectiveSecurityHealthAnalyticsCustomModule is read-only. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Output only. The user-specified configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. diff --git a/docs/dyn/securitycenter_v1.organizations.sources.findings.html b/docs/dyn/securitycenter_v1.organizations.sources.findings.html index 9cec3e6379d..96267d6da77 100644 --- a/docs/dyn/securitycenter_v1.organizations.sources.findings.html +++ b/docs/dyn/securitycenter_v1.organizations.sources.findings.html @@ -1940,6 +1940,7 @@

Method Details

}, ], "resourceGroup": { # Represents an Azure resource group. # The Azure resource group associated with the resource. + "id": "A String", # The ID of the Azure resource group. "name": "A String", # The name of the Azure resource group. This is not a UUID. }, "subscription": { # Represents an Azure subscription. # The Azure subscription associated with the resource. diff --git a/docs/dyn/securitycenter_v1.projects.locations.muteConfigs.html b/docs/dyn/securitycenter_v1.projects.locations.muteConfigs.html index dac04dbb4b7..c8f2f30294a 100644 --- a/docs/dyn/securitycenter_v1.projects.locations.muteConfigs.html +++ b/docs/dyn/securitycenter_v1.projects.locations.muteConfigs.html @@ -77,21 +77,12 @@

Instance Methods

close()

Close httplib2 connections.

-

- create(parent, body=None, muteConfigId=None, x__xgafv=None)

-

Creates a mute config.

delete(name, x__xgafv=None)

Deletes an existing mute config.

get(name, x__xgafv=None)

Gets a mute config.

-

- list(parent, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists mute configs.

-

- list_next()

-

Retrieves the next page of results.

patch(name, body=None, updateMask=None, x__xgafv=None)

Updates a mute config.

@@ -101,49 +92,6 @@

Method Details

Close httplib2 connections.
-
- create(parent, body=None, muteConfigId=None, x__xgafv=None) -
Creates a mute config.
-
-Args:
-  parent: string, Required. Resource name of the new mute configs's parent. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, or `projects/[project_id]`. (required)
-  body: object, The request body.
-    The object takes the form of:
-
-{ # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-  "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-  "description": "A String", # A description of the mute config.
-  "displayName": "A String", # The human readable name to be displayed for the mute config.
-  "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-  "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-  "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-  "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-  "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-  "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-}
-
-  muteConfigId: string, Required. Unique identifier provided by the client within the parent scope. It must consist of only lowercase letters, numbers, and hyphens, must start with a letter, must end with either a letter or a number, and must be 63 characters or less.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-  "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-  "description": "A String", # A description of the mute config.
-  "displayName": "A String", # The human readable name to be displayed for the mute config.
-  "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-  "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-  "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-  "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-  "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-  "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-}
-
-
delete(name, x__xgafv=None)
Deletes an existing mute config.
@@ -189,54 +137,6 @@ 

Method Details

}
-
- list(parent, pageSize=None, pageToken=None, x__xgafv=None) -
Lists mute configs.
-
-Args:
-  parent: string, Required. The parent, which owns the collection of mute configs. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, `projects/[project_id]`. (required)
-  pageSize: integer, The maximum number of configs to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
-  pageToken: string, A page token, received from a previous `ListMuteConfigs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListMuteConfigs` must match the call that provided the page token.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Response message for listing mute configs.
-  "muteConfigs": [ # The mute configs from the specified parent.
-    { # A mute config is a Cloud SCC resource that contains the configuration to mute create/update events of findings.
-      "createTime": "A String", # Output only. The time at which the mute config was created. This field is set by the server and will be ignored if provided on config creation.
-      "description": "A String", # A description of the mute config.
-      "displayName": "A String", # The human readable name to be displayed for the mute config.
-      "expiryTime": "A String", # Optional. The expiry of the mute config. Only applicable for dynamic configs. If the expiry is set, when the config expires, it is removed from all findings.
-      "filter": "A String", # Required. An expression that defines the filter to apply across create/update events of findings. While creating a filter string, be mindful of the scope in which the mute configuration is being created. E.g., If a filter contains project = X but is created under the project = Y scope, it might not match any findings. The following field and operator combinations are supported: * severity: `=`, `:` * category: `=`, `:` * resource.name: `=`, `:` * resource.project_name: `=`, `:` * resource.project_display_name: `=`, `:` * resource.folders.resource_folder: `=`, `:` * resource.parent_name: `=`, `:` * resource.parent_display_name: `=`, `:` * resource.type: `=`, `:` * finding_class: `=`, `:` * indicator.ip_addresses: `=`, `:` * indicator.domains: `=`, `:`
-      "mostRecentEditor": "A String", # Output only. Email address of the user who last edited the mute config. This field is set by the server and will be ignored if provided on config creation or update.
-      "name": "A String", # This field will be ignored if provided on config creation. Format `organizations/{organization}/muteConfigs/{mute_config}` `folders/{folder}/muteConfigs/{mute_config}` `projects/{project}/muteConfigs/{mute_config}` `organizations/{organization}/locations/global/muteConfigs/{mute_config}` `folders/{folder}/locations/global/muteConfigs/{mute_config}` `projects/{project}/locations/global/muteConfigs/{mute_config}`
-      "type": "A String", # Optional. The type of the mute config, which determines what type of mute state the config affects. The static mute state takes precedence over the dynamic mute state. Immutable after creation. STATIC by default if not set during creation.
-      "updateTime": "A String", # Output only. The most recent time at which the mute config was updated. This field is set by the server and will be ignored if provided on config creation or update.
-    },
-  ],
-  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
-}
-
- -
- list_next() -
Retrieves the next page of results.
-
-        Args:
-          previous_request: The request for the previous page. (required)
-          previous_response: The response from the request for the previous page. (required)
-
-        Returns:
-          A request object that you can call 'execute()' on to request the next
-          page. Returns None if there are no more items in the collection.
-        
-
-
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a mute config.
diff --git a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html
index 46e13e5007c..8868070d896 100644
--- a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html
+++ b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.customModules.html
@@ -122,6 +122,9 @@ 

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -168,6 +171,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -239,6 +245,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -297,6 +306,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -357,6 +369,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -435,6 +450,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -482,6 +500,9 @@

Method Details

{ # Represents an instance of a Security Health Analytics custom module, including its full module name, display name, enablement state, and last updated time. You can create a custom module at the organization, folder, or project level. Custom modules that you create at the organization or folder level are inherited by the child folders and projects. "ancestorModule": "A String", # Output only. If empty, indicates that the custom module was created in the organization, folder, or project in which you are viewing the custom module. Otherwise, `ancestor_module` specifies the organization or folder from which the custom module is inherited. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # The user specified custom configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -529,6 +550,9 @@

Method Details

{ # Request message to simulate a CustomConfig against a given test resource. Maximum size of the request is 4 MB by default. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Required. The custom configuration that you need to test. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. diff --git a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.effectiveCustomModules.html b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.effectiveCustomModules.html index 16661fed85a..1431ca61721 100644 --- a/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.effectiveCustomModules.html +++ b/docs/dyn/securitycenter_v1.projects.securityHealthAnalyticsSettings.effectiveCustomModules.html @@ -108,6 +108,9 @@

Method Details

{ # An EffectiveSecurityHealthAnalyticsCustomModule is the representation of a Security Health Analytics custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablementState` property in EffectiveSecurityHealthAnalyticsCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective enablement_state for the module in all child folders or projects is also `enabled`. EffectiveSecurityHealthAnalyticsCustomModule is read-only. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Output only. The user-specified configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. @@ -162,6 +165,9 @@

Method Details

"effectiveSecurityHealthAnalyticsCustomModules": [ # Effective custom modules belonging to the requested parent. { # An EffectiveSecurityHealthAnalyticsCustomModule is the representation of a Security Health Analytics custom module at a specified level of the resource hierarchy: organization, folder, or project. If a custom module is inherited from a parent organization or folder, the value of the `enablementState` property in EffectiveSecurityHealthAnalyticsCustomModule is set to the value that is effective in the parent, instead of `INHERITED`. For example, if the module is enabled in a parent organization or folder, the effective enablement_state for the module in all child folders or projects is also `enabled`. EffectiveSecurityHealthAnalyticsCustomModule is read-only. "customConfig": { # Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify. # Output only. The user-specified configuration for the module. + "celPolicy": { # YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: "compute.googleapis.com/Instance" - resource_types: "compute.googleapis.com/Firewall" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies. # The CEL policy spec attached to the custom module. + "spec": "A String", # The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false. + }, "customOutput": { # A set of optional name-value pairs that define custom source properties to return with each finding that is generated by the custom module. The custom source properties that are defined here are included in the finding JSON under `sourceProperties`. # Custom output properties. "properties": [ # A list of custom output properties to add to the finding. { # An individual name-value pair that defines a custom source property. diff --git a/docs/dyn/securitycenter_v1.projects.sources.findings.html b/docs/dyn/securitycenter_v1.projects.sources.findings.html index d9360ae4745..99902ea6444 100644 --- a/docs/dyn/securitycenter_v1.projects.sources.findings.html +++ b/docs/dyn/securitycenter_v1.projects.sources.findings.html @@ -782,6 +782,7 @@

Method Details

}, ], "resourceGroup": { # Represents an Azure resource group. # The Azure resource group associated with the resource. + "id": "A String", # The ID of the Azure resource group. "name": "A String", # The name of the Azure resource group. This is not a UUID. }, "subscription": { # Represents an Azure subscription. # The Azure subscription associated with the resource. diff --git a/docs/dyn/servicemanagement_v1.services.configs.html b/docs/dyn/servicemanagement_v1.services.configs.html index 506f826e635..b26d08f6718 100644 --- a/docs/dyn/servicemanagement_v1.services.configs.html +++ b/docs/dyn/servicemanagement_v1.services.configs.html @@ -129,7 +129,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, @@ -790,7 +790,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, @@ -1463,7 +1463,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, @@ -2136,7 +2136,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, diff --git a/docs/dyn/servicemanagement_v1.services.html b/docs/dyn/servicemanagement_v1.services.html index 934f63571b5..fce94b9018d 100644 --- a/docs/dyn/servicemanagement_v1.services.html +++ b/docs/dyn/servicemanagement_v1.services.html @@ -324,7 +324,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, diff --git a/docs/dyn/serviceusage_v1.services.html b/docs/dyn/serviceusage_v1.services.html index 800009e2442..01dd85fd8f3 100644 --- a/docs/dyn/serviceusage_v1.services.html +++ b/docs/dyn/serviceusage_v1.services.html @@ -183,7 +183,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, @@ -490,7 +490,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, @@ -712,7 +712,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, diff --git a/docs/dyn/serviceusage_v1beta1.services.html b/docs/dyn/serviceusage_v1beta1.services.html index a1e199964e9..6677e896a02 100644 --- a/docs/dyn/serviceusage_v1beta1.services.html +++ b/docs/dyn/serviceusage_v1beta1.services.html @@ -307,7 +307,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, @@ -529,7 +529,7 @@

Method Details

}, ], "mixins": [ # Included interfaces. See Mixin. - { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } + { # Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = "/v2/{resource=**}"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; } ... } "name": "A String", # The fully qualified name of the interface which is included. "root": "A String", # If non-empty specifies a path under which inherited HTTP paths are rooted. }, diff --git a/docs/dyn/storagetransfer_v1.transferJobs.html b/docs/dyn/storagetransfer_v1.transferJobs.html index 8a41db247dc..528f2913411 100644 --- a/docs/dyn/storagetransfer_v1.transferJobs.html +++ b/docs/dyn/storagetransfer_v1.transferJobs.html @@ -141,18 +141,18 @@

Method Details

"pubsubTopic": "A String", # Required. The `Topic.name` of the Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format results in an INVALID_ARGUMENT error. }, "projectId": "A String", # The ID of the Google Cloud project that owns the job. - "replicationSpec": { # Specifies the configuration for running a replication job. # Replication specification. - "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data sink. + "replicationSpec": { # Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job. # Replication specification. + "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket to which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data source. + "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket from which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' "last modification time" do not exclude objects in a data sink. + "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported. "excludePrefixes": [ # If you specify `exclude_prefixes`, Storage Transfer Service uses the items in the `exclude_prefixes` array to determine which objects to exclude from a transfer. Objects must not start with one of the matching `exclude_prefixes` for inclusion in a transfer. The following are requirements of `exclude_prefixes`: * Each exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each exclude-prefix must omit the leading slash. For example, to exclude the object `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the exclude-prefix as `logs/y=2015/requests.gz`. * None of the exclude-prefix values can be empty, if specified. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If include_prefixes is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `exclude_prefixes` is 1000. For more information, see [Filtering objects from transfers](/storage-transfer/docs/filtering-objects-from-transfers). "A String", ], @@ -164,7 +164,7 @@

Method Details

"maxTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred if a specific maximum time has elapsed since the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation`and the "last modification time" of the object is less than the value of max_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. "minTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred until a specific minimum time has elapsed after the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation` and the "last modification time" of the object is equal to or greater than the value of min_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. }, - "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error. + "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error. "deleteObjectsFromSourceAfterTransfer": True or False, # Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive. "deleteObjectsUniqueInSink": True or False, # Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive. "metadataOptions": { # Specifies the metadata options for running a transfer. # Represents the selected metadata options for a transfer job. @@ -343,18 +343,18 @@

Method Details

"pubsubTopic": "A String", # Required. The `Topic.name` of the Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format results in an INVALID_ARGUMENT error. }, "projectId": "A String", # The ID of the Google Cloud project that owns the job. - "replicationSpec": { # Specifies the configuration for running a replication job. # Replication specification. - "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data sink. + "replicationSpec": { # Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job. # Replication specification. + "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket to which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data source. + "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket from which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' "last modification time" do not exclude objects in a data sink. + "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported. "excludePrefixes": [ # If you specify `exclude_prefixes`, Storage Transfer Service uses the items in the `exclude_prefixes` array to determine which objects to exclude from a transfer. Objects must not start with one of the matching `exclude_prefixes` for inclusion in a transfer. The following are requirements of `exclude_prefixes`: * Each exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each exclude-prefix must omit the leading slash. For example, to exclude the object `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the exclude-prefix as `logs/y=2015/requests.gz`. * None of the exclude-prefix values can be empty, if specified. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If include_prefixes is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `exclude_prefixes` is 1000. For more information, see [Filtering objects from transfers](/storage-transfer/docs/filtering-objects-from-transfers). "A String", ], @@ -366,7 +366,7 @@

Method Details

"maxTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred if a specific maximum time has elapsed since the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation`and the "last modification time" of the object is less than the value of max_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. "minTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred until a specific minimum time has elapsed after the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation` and the "last modification time" of the object is equal to or greater than the value of min_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. }, - "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error. + "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error. "deleteObjectsFromSourceAfterTransfer": True or False, # Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive. "deleteObjectsUniqueInSink": True or False, # Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive. "metadataOptions": { # Specifies the metadata options for running a transfer. # Represents the selected metadata options for a transfer job. @@ -572,18 +572,18 @@

Method Details

"pubsubTopic": "A String", # Required. The `Topic.name` of the Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format results in an INVALID_ARGUMENT error. }, "projectId": "A String", # The ID of the Google Cloud project that owns the job. - "replicationSpec": { # Specifies the configuration for running a replication job. # Replication specification. - "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data sink. + "replicationSpec": { # Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job. # Replication specification. + "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket to which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data source. + "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket from which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' "last modification time" do not exclude objects in a data sink. + "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported. "excludePrefixes": [ # If you specify `exclude_prefixes`, Storage Transfer Service uses the items in the `exclude_prefixes` array to determine which objects to exclude from a transfer. Objects must not start with one of the matching `exclude_prefixes` for inclusion in a transfer. The following are requirements of `exclude_prefixes`: * Each exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each exclude-prefix must omit the leading slash. For example, to exclude the object `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the exclude-prefix as `logs/y=2015/requests.gz`. * None of the exclude-prefix values can be empty, if specified. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If include_prefixes is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `exclude_prefixes` is 1000. For more information, see [Filtering objects from transfers](/storage-transfer/docs/filtering-objects-from-transfers). "A String", ], @@ -595,7 +595,7 @@

Method Details

"maxTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred if a specific maximum time has elapsed since the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation`and the "last modification time" of the object is less than the value of max_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. "minTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred until a specific minimum time has elapsed after the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation` and the "last modification time" of the object is equal to or greater than the value of min_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. }, - "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error. + "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error. "deleteObjectsFromSourceAfterTransfer": True or False, # Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive. "deleteObjectsUniqueInSink": True or False, # Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive. "metadataOptions": { # Specifies the metadata options for running a transfer. # Represents the selected metadata options for a transfer job. @@ -743,7 +743,7 @@

Method Details

Lists transfer jobs.
 
 Args:
-  filter: string, Required. A list of query parameters specified as JSON text in the form of: `{"projectId":"my_project_id", "jobNames":["jobid1","jobid2",...], "jobStatuses":["status1","status2",...]}` Since `jobNames` and `jobStatuses` support multiple values, their values must be specified with array notation. `projectId` is required. `jobNames` and `jobStatuses` are optional. The valid values for `jobStatuses` are case-insensitive: ENABLED, DISABLED, and DELETED. (required)
+  filter: string, Required. A list of query parameters specified as JSON text in the form of: ``` { "projectId":"my_project_id", "jobNames":["jobid1","jobid2",...], "jobStatuses":["status1","status2",...], "dataBackend":"QUERY_REPLICATION_CONFIGS", "sourceBucket":"source-bucket-name", "sinkBucket":"sink-bucket-name", } ``` The JSON formatting in the example is for display only; provide the query parameters without spaces or line breaks. * `projectId` is required. * Since `jobNames` and `jobStatuses` support multiple values, their values must be specified with array notation. `jobNames` and `jobStatuses` are optional. Valid values are case-insensitive: * ENABLED * DISABLED * DELETED * Specify `"dataBackend":"QUERY_REPLICATION_CONFIGS"` to return a list of cross-bucket replication jobs. * Limit the results to jobs from a particular bucket with `sourceBucket` and/or to a particular bucket with `sinkBucket`. (required)
   pageSize: integer, The list page size. The max allowed value is 256.
   pageToken: string, The list page token.
   x__xgafv: string, V1 error format.
@@ -786,18 +786,18 @@ 

Method Details

"pubsubTopic": "A String", # Required. The `Topic.name` of the Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format results in an INVALID_ARGUMENT error. }, "projectId": "A String", # The ID of the Google Cloud project that owns the job. - "replicationSpec": { # Specifies the configuration for running a replication job. # Replication specification. - "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data sink. + "replicationSpec": { # Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job. # Replication specification. + "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket to which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data source. + "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket from which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' "last modification time" do not exclude objects in a data sink. + "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported. "excludePrefixes": [ # If you specify `exclude_prefixes`, Storage Transfer Service uses the items in the `exclude_prefixes` array to determine which objects to exclude from a transfer. Objects must not start with one of the matching `exclude_prefixes` for inclusion in a transfer. The following are requirements of `exclude_prefixes`: * Each exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each exclude-prefix must omit the leading slash. For example, to exclude the object `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the exclude-prefix as `logs/y=2015/requests.gz`. * None of the exclude-prefix values can be empty, if specified. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If include_prefixes is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `exclude_prefixes` is 1000. For more information, see [Filtering objects from transfers](/storage-transfer/docs/filtering-objects-from-transfers). "A String", ], @@ -809,7 +809,7 @@

Method Details

"maxTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred if a specific maximum time has elapsed since the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation`and the "last modification time" of the object is less than the value of max_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. "minTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred until a specific minimum time has elapsed after the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation` and the "last modification time" of the object is equal to or greater than the value of min_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. }, - "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error. + "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error. "deleteObjectsFromSourceAfterTransfer": True or False, # Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive. "deleteObjectsUniqueInSink": True or False, # Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive. "metadataOptions": { # Specifies the metadata options for running a transfer. # Represents the selected metadata options for a transfer job. @@ -1008,18 +1008,18 @@

Method Details

"pubsubTopic": "A String", # Required. The `Topic.name` of the Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format results in an INVALID_ARGUMENT error. }, "projectId": "A String", # The ID of the Google Cloud project that owns the job. - "replicationSpec": { # Specifies the configuration for running a replication job. # Replication specification. - "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data sink. + "replicationSpec": { # Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job. # Replication specification. + "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket to which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data source. + "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket from which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' "last modification time" do not exclude objects in a data sink. + "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported. "excludePrefixes": [ # If you specify `exclude_prefixes`, Storage Transfer Service uses the items in the `exclude_prefixes` array to determine which objects to exclude from a transfer. Objects must not start with one of the matching `exclude_prefixes` for inclusion in a transfer. The following are requirements of `exclude_prefixes`: * Each exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each exclude-prefix must omit the leading slash. For example, to exclude the object `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the exclude-prefix as `logs/y=2015/requests.gz`. * None of the exclude-prefix values can be empty, if specified. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If include_prefixes is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `exclude_prefixes` is 1000. For more information, see [Filtering objects from transfers](/storage-transfer/docs/filtering-objects-from-transfers). "A String", ], @@ -1031,7 +1031,7 @@

Method Details

"maxTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred if a specific maximum time has elapsed since the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation`and the "last modification time" of the object is less than the value of max_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. "minTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred until a specific minimum time has elapsed after the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation` and the "last modification time" of the object is equal to or greater than the value of min_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. }, - "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error. + "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error. "deleteObjectsFromSourceAfterTransfer": True or False, # Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive. "deleteObjectsUniqueInSink": True or False, # Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive. "metadataOptions": { # Specifies the metadata options for running a transfer. # Represents the selected metadata options for a transfer job. @@ -1212,18 +1212,18 @@

Method Details

"pubsubTopic": "A String", # Required. The `Topic.name` of the Pub/Sub topic to which to publish notifications. Must be of the format: `projects/{project}/topics/{topic}`. Not matching this format results in an INVALID_ARGUMENT error. }, "projectId": "A String", # The ID of the Google Cloud project that owns the job. - "replicationSpec": { # Specifies the configuration for running a replication job. # Replication specification. - "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data sink. + "replicationSpec": { # Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job. # Replication specification. + "gcsDataSink": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket to which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # Specifies cloud Storage data source. + "gcsDataSource": { # In a GcsData resource, an object's name is the Cloud Storage object's name and its "last modification time" refers to the object's `updated` property of Cloud Storage objects, which changes when the content or the metadata of the object is updated. # The Cloud Storage bucket from which to replicate objects. "bucketName": "A String", # Required. Cloud Storage bucket name. Must meet [Bucket Name Requirements](/storage/docs/naming#requirements). "managedFolderTransferEnabled": True or False, # Preview. Enables the transfer of managed folders between Cloud Storage buckets. Set this option on the gcs_data_source. If set to true: - Managed folders in the source bucket are transferred to the destination bucket. - Managed folders in the destination bucket are overwritten. Other OVERWRITE options are not supported. See [Transfer Cloud Storage managed folders](/storage-transfer/docs/managed-folders). "path": "A String", # Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'. The root path value must meet [Object Name Requirements](/storage/docs/naming#objectnames). }, - "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' "last modification time" do not exclude objects in a data sink. + "objectConditions": { # Conditions that determine which objects are transferred. Applies only to Cloud Data Sources such as S3, Azure, and Cloud Storage. The "last modification time" refers to the time of the last change to the object's content or metadata — specifically, this is the `updated` property of Cloud Storage objects, the `LastModified` field of S3 objects, and the `Last-Modified` header of Azure blobs. Transfers with a PosixFilesystem source or destination don't support `ObjectConditions`. # Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported. "excludePrefixes": [ # If you specify `exclude_prefixes`, Storage Transfer Service uses the items in the `exclude_prefixes` array to determine which objects to exclude from a transfer. Objects must not start with one of the matching `exclude_prefixes` for inclusion in a transfer. The following are requirements of `exclude_prefixes`: * Each exclude-prefix can contain any sequence of Unicode characters, to a max length of 1024 bytes when UTF8-encoded, and must not contain Carriage Return or Line Feed characters. Wildcard matching and regular expression matching are not supported. * Each exclude-prefix must omit the leading slash. For example, to exclude the object `s3://my-aws-bucket/logs/y=2015/requests.gz`, specify the exclude-prefix as `logs/y=2015/requests.gz`. * None of the exclude-prefix values can be empty, if specified. * Each exclude-prefix must exclude a distinct portion of the object namespace. No exclude-prefix may be a prefix of another exclude-prefix. * If include_prefixes is specified, then each exclude-prefix must start with the value of a path explicitly included by `include_prefixes`. The max size of `exclude_prefixes` is 1000. For more information, see [Filtering objects from transfers](/storage-transfer/docs/filtering-objects-from-transfers). "A String", ], @@ -1235,7 +1235,7 @@

Method Details

"maxTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred if a specific maximum time has elapsed since the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation`and the "last modification time" of the object is less than the value of max_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. "minTimeElapsedSinceLastModification": "A String", # Ensures that objects are not transferred until a specific minimum time has elapsed after the "last modification time". When a TransferOperation begins, objects with a "last modification time" are transferred only if the elapsed time between the start_time of the `TransferOperation` and the "last modification time" of the object is equal to or greater than the value of min_time_elapsed_since_last_modification`. Objects that do not have a "last modification time" are also transferred. }, - "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error. + "transferOptions": { # TransferOptions define the actions to be performed on objects in a transfer. # Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error. "deleteObjectsFromSourceAfterTransfer": True or False, # Whether objects should be deleted from the source after they are transferred to the sink. **Note:** This option and delete_objects_unique_in_sink are mutually exclusive. "deleteObjectsUniqueInSink": True or False, # Whether objects that exist only in the sink should be deleted. **Note:** This option and delete_objects_from_source_after_transfer are mutually exclusive. "metadataOptions": { # Specifies the metadata options for running a transfer. # Represents the selected metadata options for a transfer job. diff --git a/docs/dyn/texttospeech_v1.projects.locations.html b/docs/dyn/texttospeech_v1.projects.locations.html index f4c827c013f..9273db9be71 100644 --- a/docs/dyn/texttospeech_v1.projects.locations.html +++ b/docs/dyn/texttospeech_v1.projects.locations.html @@ -112,6 +112,15 @@

Method Details

"volumeGainDb": 3.14, # Optional. Input only. Volume gain (in dB) of the normal native volume supported by the specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) will play at approximately half the amplitude of the normal native signal amplitude. A value of +6.0 (dB) will play at approximately twice the amplitude of the normal native signal amplitude. Strongly recommend not to exceed +10 (dB) as there's usually no effective increase in loudness for any value greater than that. }, "input": { # Contains text input to be synthesized. Either `text` or `ssml` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. The input size is limited to 5000 bytes. # Required. The Synthesizer requires either plain text or SSML as input. + "customPronunciations": { # A collection of pronunciation customizations. # Optional. The pronunciation customizations to be applied to the input. If this is set, the input will be synthesized using the given pronunciation customizations. The initial support will be for EFIGS (English, French, Italian, German, Spanish) languages, as provided in VoiceSelectionParams. Journey and Instant Clone voices are not supported yet. In order to customize the pronunciation of a phrase, there must be an exact match of the phrase in the input types. If using SSML, the phrase must not be inside a phoneme tag (entirely or partially). + "pronunciations": [ # The pronunciation customizations to be applied. + { # Pronunciation customization for a phrase. + "phoneticEncoding": "A String", # The phonetic encoding of the phrase. + "phrase": "A String", # The phrase to which the customization will be applied. The phrase can be multiple words (in the case of proper nouns etc), but should not span to a whole sentence. + "pronunciation": "A String", # The pronunciation of the phrase. This must be in the phonetic encoding specified above. + }, + ], + }, "ssml": "A String", # The SSML document to be synthesized. The SSML document must be valid and well-formed. Otherwise the RPC will fail and return google.rpc.Code.INVALID_ARGUMENT. For more information, see [SSML](https://cloud.google.com/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, @@ -124,6 +133,9 @@

Method Details

"languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If both the name and the gender are not set, the service will choose a voice based on the other parameters such as language_code. "ssmlGender": "A String", # The preferred gender of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and name. Note that this is only a preference, not requirement; if a voice of the appropriate gender is not available, the synthesizer should substitute a voice with a different gender rather than failing the request. + "voiceClone": { # The configuration of Voice Clone feature. # Optional. The configuration for a voice clone. If [VoiceCloneParams.voice_clone_key] is set, the service will choose the voice clone matching the specified configuration. + "voiceCloningKey": "A String", # Required. Created by GenerateVoiceCloningKey. + }, }, } diff --git a/docs/dyn/texttospeech_v1.text.html b/docs/dyn/texttospeech_v1.text.html index 35b6c6f3635..5aaf33d9e1b 100644 --- a/docs/dyn/texttospeech_v1.text.html +++ b/docs/dyn/texttospeech_v1.text.html @@ -95,6 +95,9 @@

Method Details

The object takes the form of: { # The top-level message sent by the client for the `SynthesizeSpeech` method. + "advancedVoiceOptions": { # Used for advanced voice options. # Adnanced voice options. + "lowLatencyJourneySynthesis": True or False, # Only for Jounrney voices. If false, the synthesis will be context aware and have higher latency. + }, "audioConfig": { # Description of audio data to be synthesized. # Required. The configuration of the synthesized audio. "audioEncoding": "A String", # Required. The format of the audio byte stream. "effectsProfileId": [ # Optional. Input only. An identifier which selects 'audio effects' profiles that are applied on (post synthesized) text to speech. Effects are applied on top of each other in the order they are given. See [audio profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for current supported profile ids. @@ -106,6 +109,15 @@

Method Details

"volumeGainDb": 3.14, # Optional. Input only. Volume gain (in dB) of the normal native volume supported by the specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) will play at approximately half the amplitude of the normal native signal amplitude. A value of +6.0 (dB) will play at approximately twice the amplitude of the normal native signal amplitude. Strongly recommend not to exceed +10 (dB) as there's usually no effective increase in loudness for any value greater than that. }, "input": { # Contains text input to be synthesized. Either `text` or `ssml` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. The input size is limited to 5000 bytes. # Required. The Synthesizer requires either plain text or SSML as input. + "customPronunciations": { # A collection of pronunciation customizations. # Optional. The pronunciation customizations to be applied to the input. If this is set, the input will be synthesized using the given pronunciation customizations. The initial support will be for EFIGS (English, French, Italian, German, Spanish) languages, as provided in VoiceSelectionParams. Journey and Instant Clone voices are not supported yet. In order to customize the pronunciation of a phrase, there must be an exact match of the phrase in the input types. If using SSML, the phrase must not be inside a phoneme tag (entirely or partially). + "pronunciations": [ # The pronunciation customizations to be applied. + { # Pronunciation customization for a phrase. + "phoneticEncoding": "A String", # The phonetic encoding of the phrase. + "phrase": "A String", # The phrase to which the customization will be applied. The phrase can be multiple words (in the case of proper nouns etc), but should not span to a whole sentence. + "pronunciation": "A String", # The pronunciation of the phrase. This must be in the phonetic encoding specified above. + }, + ], + }, "ssml": "A String", # The SSML document to be synthesized. The SSML document must be valid and well-formed. Otherwise the RPC will fail and return google.rpc.Code.INVALID_ARGUMENT. For more information, see [SSML](https://cloud.google.com/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, @@ -117,6 +129,9 @@

Method Details

"languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If both the name and the gender are not set, the service will choose a voice based on the other parameters such as language_code. "ssmlGender": "A String", # The preferred gender of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and name. Note that this is only a preference, not requirement; if a voice of the appropriate gender is not available, the synthesizer should substitute a voice with a different gender rather than failing the request. + "voiceClone": { # The configuration of Voice Clone feature. # Optional. The configuration for a voice clone. If [VoiceCloneParams.voice_clone_key] is set, the service will choose the voice clone matching the specified configuration. + "voiceCloningKey": "A String", # Required. Created by GenerateVoiceCloningKey. + }, }, } diff --git a/docs/dyn/texttospeech_v1.voices.html b/docs/dyn/texttospeech_v1.voices.html index be8c030ae25..34042916083 100644 --- a/docs/dyn/texttospeech_v1.voices.html +++ b/docs/dyn/texttospeech_v1.voices.html @@ -77,6 +77,9 @@

Instance Methods

close()

Close httplib2 connections.

+

+ generateVoiceCloningKey(body=None, x__xgafv=None)

+

Generates voice clone key given a short voice prompt. This method validates the voice prompts with a series of checks against the voice talent statement to verify the voice clone is safe to generate.

list(languageCode=None, x__xgafv=None)

Returns a list of Voice supported for synthesis.

@@ -86,6 +89,46 @@

Method Details

Close httplib2 connections.
+
+ generateVoiceCloningKey(body=None, x__xgafv=None) +
Generates voice clone key given a short voice prompt. This method validates the voice prompts with a series of checks against the voice talent statement to verify the voice clone is safe to generate.
+
+Args:
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for the `GenerateVoiceCloningKey` method.
+  "consentScript": "A String", # Required. The script used for the voice talent statement. The script will be provided to the caller through other channels. It must be returned unchanged in this field.
+  "languageCode": "A String", # Required. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [Language Support](https://cloud.google.com/speech-to-text/docs/languages) for a list of the currently supported language codes.
+  "referenceAudio": { # Holds audio content and config. # Required. The training audio used to create voice clone. This is currently limited to LINEAR16 PCM WAV files mono audio with 24khz sample rate. This needs to be specified in [InputAudio.audio_config], other values will be explicitly rejected.
+    "audioConfig": { # Description of inputted audio data. # Required. Provides information that specifies how to process content.
+      "audioEncoding": "A String", # Required. The format of the audio byte stream.
+      "sampleRateHertz": 42, # Required. The sample rate (in hertz) for this audio.
+    },
+    "content": "A String", # Required. The audio data bytes encoded as specified in `InputAudioConfig`. Note: as with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64. Audio samples should be between 5-25 seconds in length.
+  },
+  "voiceTalentConsent": { # Holds audio content and config. # Required. The voice talent audio used to verify consent to voice clone.
+    "audioConfig": { # Description of inputted audio data. # Required. Provides information that specifies how to process content.
+      "audioEncoding": "A String", # Required. The format of the audio byte stream.
+      "sampleRateHertz": 42, # Required. The sample rate (in hertz) for this audio.
+    },
+    "content": "A String", # Required. The audio data bytes encoded as specified in `InputAudioConfig`. Note: as with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64. Audio samples should be between 5-25 seconds in length.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for the `GenerateVoiceCloningKey` method.
+  "voiceCloningKey": "A String", # The voice clone key. Use it in the SynthesizeSpeechRequest by setting [voice.voice_clone.voice_cloning_key].
+}
+
+
list(languageCode=None, x__xgafv=None)
Returns a list of Voice supported for synthesis.
diff --git a/docs/dyn/texttospeech_v1beta1.projects.locations.html b/docs/dyn/texttospeech_v1beta1.projects.locations.html
index 00304563c69..995f17cb7f4 100644
--- a/docs/dyn/texttospeech_v1beta1.projects.locations.html
+++ b/docs/dyn/texttospeech_v1beta1.projects.locations.html
@@ -112,6 +112,15 @@ 

Method Details

"volumeGainDb": 3.14, # Optional. Input only. Volume gain (in dB) of the normal native volume supported by the specific voice, in the range [-96.0, 16.0]. If unset, or set to a value of 0.0 (dB), will play at normal native signal amplitude. A value of -6.0 (dB) will play at approximately half the amplitude of the normal native signal amplitude. A value of +6.0 (dB) will play at approximately twice the amplitude of the normal native signal amplitude. Strongly recommend not to exceed +10 (dB) as there's usually no effective increase in loudness for any value greater than that. }, "input": { # Contains text input to be synthesized. Either `text` or `ssml` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. The input size is limited to 5000 bytes. # Required. The Synthesizer requires either plain text or SSML as input. + "customPronunciations": { # A collection of pronunciation customizations. # Optional. The pronunciation customizations to be applied to the input. If this is set, the input will be synthesized using the given pronunciation customizations. The initial support will be for EFIGS (English, French, Italian, German, Spanish) languages, as provided in VoiceSelectionParams. Journey and Instant Clone voices are not supported yet. In order to customize the pronunciation of a phrase, there must be an exact match of the phrase in the input types. If using SSML, the phrase must not be inside a phoneme tag (entirely or partially). + "pronunciations": [ # The pronunciation customizations to be applied. + { # Pronunciation customization for a phrase. + "phoneticEncoding": "A String", # The phonetic encoding of the phrase. + "phrase": "A String", # The phrase to which the customization will be applied. The phrase can be multiple words (in the case of proper nouns etc), but should not span to a whole sentence. + "pronunciation": "A String", # The pronunciation of the phrase. This must be in the phonetic encoding specified above. + }, + ], + }, "ssml": "A String", # The SSML document to be synthesized. The SSML document must be valid and well-formed. Otherwise the RPC will fail and return google.rpc.Code.INVALID_ARGUMENT. For more information, see [SSML](https://cloud.google.com/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, @@ -124,6 +133,9 @@

Method Details

"languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If both the name and the gender are not set, the service will choose a voice based on the other parameters such as language_code. "ssmlGender": "A String", # The preferred gender of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and name. Note that this is only a preference, not requirement; if a voice of the appropriate gender is not available, the synthesizer should substitute a voice with a different gender rather than failing the request. + "voiceClone": { # The configuration of Voice Clone feature. # Optional. The configuration for a voice clone. If [VoiceCloneParams.voice_clone_key] is set, the service will choose the voice clone matching the specified configuration. + "voiceCloningKey": "A String", # Required. Created by GenerateVoiceCloningKey. + }, }, } diff --git a/docs/dyn/texttospeech_v1beta1.text.html b/docs/dyn/texttospeech_v1beta1.text.html index 12235c9a4f6..6dbab38493e 100644 --- a/docs/dyn/texttospeech_v1beta1.text.html +++ b/docs/dyn/texttospeech_v1beta1.text.html @@ -95,6 +95,9 @@

Method Details

The object takes the form of: { # The top-level message sent by the client for the `SynthesizeSpeech` method. + "advancedVoiceOptions": { # Used for advanced voice options. # Adnanced voice options. + "lowLatencyJourneySynthesis": True or False, # Only for Jounrney voices. If false, the synthesis will be context aware and have higher latency. + }, "audioConfig": { # Description of audio data to be synthesized. # Required. The configuration of the synthesized audio. "audioEncoding": "A String", # Required. The format of the audio byte stream. "effectsProfileId": [ # Optional. Input only. An identifier which selects 'audio effects' profiles that are applied on (post synthesized) text to speech. Effects are applied on top of each other in the order they are given. See [audio profiles](https://cloud.google.com/text-to-speech/docs/audio-profiles) for current supported profile ids. @@ -109,6 +112,15 @@

Method Details

"A String", ], "input": { # Contains text input to be synthesized. Either `text` or `ssml` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. The input size is limited to 5000 bytes. # Required. The Synthesizer requires either plain text or SSML as input. + "customPronunciations": { # A collection of pronunciation customizations. # Optional. The pronunciation customizations to be applied to the input. If this is set, the input will be synthesized using the given pronunciation customizations. The initial support will be for EFIGS (English, French, Italian, German, Spanish) languages, as provided in VoiceSelectionParams. Journey and Instant Clone voices are not supported yet. In order to customize the pronunciation of a phrase, there must be an exact match of the phrase in the input types. If using SSML, the phrase must not be inside a phoneme tag (entirely or partially). + "pronunciations": [ # The pronunciation customizations to be applied. + { # Pronunciation customization for a phrase. + "phoneticEncoding": "A String", # The phonetic encoding of the phrase. + "phrase": "A String", # The phrase to which the customization will be applied. The phrase can be multiple words (in the case of proper nouns etc), but should not span to a whole sentence. + "pronunciation": "A String", # The pronunciation of the phrase. This must be in the phonetic encoding specified above. + }, + ], + }, "ssml": "A String", # The SSML document to be synthesized. The SSML document must be valid and well-formed. Otherwise the RPC will fail and return google.rpc.Code.INVALID_ARGUMENT. For more information, see [SSML](https://cloud.google.com/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, @@ -120,6 +132,9 @@

Method Details

"languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If both the name and the gender are not set, the service will choose a voice based on the other parameters such as language_code. "ssmlGender": "A String", # The preferred gender of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and name. Note that this is only a preference, not requirement; if a voice of the appropriate gender is not available, the synthesizer should substitute a voice with a different gender rather than failing the request. + "voiceClone": { # The configuration of Voice Clone feature. # Optional. The configuration for a voice clone. If [VoiceCloneParams.voice_clone_key] is set, the service will choose the voice clone matching the specified configuration. + "voiceCloningKey": "A String", # Required. Created by GenerateVoiceCloningKey. + }, }, } diff --git a/docs/dyn/texttospeech_v1beta1.voices.html b/docs/dyn/texttospeech_v1beta1.voices.html index 709d6ed2f18..08d0b29d965 100644 --- a/docs/dyn/texttospeech_v1beta1.voices.html +++ b/docs/dyn/texttospeech_v1beta1.voices.html @@ -77,6 +77,9 @@

Instance Methods

close()

Close httplib2 connections.

+

+ generateVoiceCloningKey(body=None, x__xgafv=None)

+

Generates voice clone key given a short voice prompt. This method validates the voice prompts with a series of checks against the voice talent statement to verify the voice clone is safe to generate.

list(languageCode=None, x__xgafv=None)

Returns a list of Voice supported for synthesis.

@@ -86,6 +89,46 @@

Method Details

Close httplib2 connections.
+
+ generateVoiceCloningKey(body=None, x__xgafv=None) +
Generates voice clone key given a short voice prompt. This method validates the voice prompts with a series of checks against the voice talent statement to verify the voice clone is safe to generate.
+
+Args:
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for the `GenerateVoiceCloningKey` method.
+  "consentScript": "A String", # Required. The script used for the voice talent statement. The script will be provided to the caller through other channels. It must be returned unchanged in this field.
+  "languageCode": "A String", # Required. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [Language Support](https://cloud.google.com/speech-to-text/docs/languages) for a list of the currently supported language codes.
+  "referenceAudio": { # Holds audio content and config. # Required. The training audio used to create voice clone. This is currently limited to LINEAR16 PCM WAV files mono audio with 24khz sample rate. This needs to be specified in [InputAudio.audio_config], other values will be explicitly rejected.
+    "audioConfig": { # Description of inputted audio data. # Required. Provides information that specifies how to process content.
+      "audioEncoding": "A String", # Required. The format of the audio byte stream.
+      "sampleRateHertz": 42, # Required. The sample rate (in hertz) for this audio.
+    },
+    "content": "A String", # Required. The audio data bytes encoded as specified in `InputAudioConfig`. Note: as with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64. Audio samples should be between 5-25 seconds in length.
+  },
+  "voiceTalentConsent": { # Holds audio content and config. # Required. The voice talent audio used to verify consent to voice clone.
+    "audioConfig": { # Description of inputted audio data. # Required. Provides information that specifies how to process content.
+      "audioEncoding": "A String", # Required. The format of the audio byte stream.
+      "sampleRateHertz": 42, # Required. The sample rate (in hertz) for this audio.
+    },
+    "content": "A String", # Required. The audio data bytes encoded as specified in `InputAudioConfig`. Note: as with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64. Audio samples should be between 5-25 seconds in length.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for the `GenerateVoiceCloningKey` method.
+  "voiceCloningKey": "A String", # The voice clone key. Use it in the SynthesizeSpeechRequest by setting [voice.voice_clone.voice_cloning_key].
+}
+
+
list(languageCode=None, x__xgafv=None)
Returns a list of Voice supported for synthesis.
diff --git a/docs/dyn/workflows_v1.projects.locations.workflows.html b/docs/dyn/workflows_v1.projects.locations.workflows.html
index a3a687d1722..5ce97d56cf2 100644
--- a/docs/dyn/workflows_v1.projects.locations.workflows.html
+++ b/docs/dyn/workflows_v1.projects.locations.workflows.html
@@ -116,7 +116,7 @@ 

Method Details

body: object, The request body. The object takes the form of: -{ # LINT.IfChange Workflow program to be executed by Workflows. +{ # Workflow program to be executed by Workflows. "allKmsKeys": [ # Output only. A list of all KMS crypto keys used to encrypt or decrypt the data associated with the workflow. "A String", ], @@ -228,7 +228,7 @@

Method Details

Returns: An object of the form: - { # LINT.IfChange Workflow program to be executed by Workflows. + { # Workflow program to be executed by Workflows. "allKmsKeys": [ # Output only. A list of all KMS crypto keys used to encrypt or decrypt the data associated with the workflow. "A String", ], @@ -285,7 +285,7 @@

Method Details

"A String", ], "workflows": [ # The workflows that match the request. - { # LINT.IfChange Workflow program to be executed by Workflows. + { # Workflow program to be executed by Workflows. "allKmsKeys": [ # Output only. A list of all KMS crypto keys used to encrypt or decrypt the data associated with the workflow. "A String", ], @@ -339,7 +339,7 @@

Method Details

{ # Response for the ListWorkflowRevisions method. "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. "workflows": [ # The revisions of the workflow, ordered in reverse chronological order. - { # LINT.IfChange Workflow program to be executed by Workflows. + { # Workflow program to be executed by Workflows. "allKmsKeys": [ # Output only. A list of all KMS crypto keys used to encrypt or decrypt the data associated with the workflow. "A String", ], @@ -411,7 +411,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # LINT.IfChange Workflow program to be executed by Workflows. +{ # Workflow program to be executed by Workflows. "allKmsKeys": [ # Output only. A list of all KMS crypto keys used to encrypt or decrypt the data associated with the workflow. "A String", ], diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json index a9e53dd8760..4ac7434d43f 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json @@ -3115,7 +3115,7 @@ } } }, -"revision": "20240708", +"revision": "20241002", "rootUrl": "https://adexchangebuyer.googleapis.com/", "schemas": { "AbsoluteDateRange": { @@ -6570,22 +6570,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index f9ab6fc08ee..022b161c846 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -6590,7 +6590,7 @@ ], "parameters": { "parent": { -"description": "Required. The resource name of the EntityType to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`", +"description": "Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/featurestores/[^/]+/entityTypes/[^/]+$", "required": true, @@ -17813,7 +17813,7 @@ } } }, -"revision": "20240923", +"revision": "20240925", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -18607,11 +18607,11 @@ "type": "object" }, "GoogleCloudAiplatformV1BatchCreateFeaturesRequest": { -"description": "Request message for FeaturestoreService.BatchCreateFeatures.", +"description": "Request message for FeaturestoreService.BatchCreateFeatures. Request message for FeatureRegistryService.BatchCreateFeatures.", "id": "GoogleCloudAiplatformV1BatchCreateFeaturesRequest", "properties": { "requests": { -"description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", +"description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType / FeatureGroup. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", "items": { "$ref": "GoogleCloudAiplatformV1CreateFeatureRequest" }, diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index 2dfb8ac63c0..199f7a9ee92 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -8344,7 +8344,7 @@ ], "parameters": { "parent": { -"description": "Required. The resource name of the EntityType to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`", +"description": "Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/featurestores/[^/]+/entityTypes/[^/]+$", "required": true, @@ -21202,7 +21202,7 @@ } } }, -"revision": "20240916", +"revision": "20240925", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -22250,11 +22250,11 @@ "type": "object" }, "GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest": { -"description": "Request message for FeaturestoreService.BatchCreateFeatures.", +"description": "Request message for FeaturestoreService.BatchCreateFeatures. Request message for FeatureRegistryService.BatchCreateFeatures.", "id": "GoogleCloudAiplatformV1beta1BatchCreateFeaturesRequest", "properties": { "requests": { -"description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", +"description": "Required. The request message specifying the Features to create. All Features must be created under the same parent EntityType / FeatureGroup. The `parent` field in each child request message can be omitted. If `parent` is set in a child request, then the value must match the `parent` value in this request message.", "items": { "$ref": "GoogleCloudAiplatformV1beta1CreateFeatureRequest" }, @@ -37743,7 +37743,7 @@ false "type": "number" }, "sourceUri": { -"description": "For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name.", +"description": "If the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name.", "type": "string" }, "sparseDistance": { diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json index 758b71f084f..69e10f9ee08 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json @@ -4886,7 +4886,7 @@ } } }, -"revision": "20240916", +"revision": "20240929", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1alphaAccessBetweenFilter": { @@ -6037,7 +6037,7 @@ "type": "string" }, "project": { -"description": "Immutable. The linked Google Cloud project resource name. Currently, this API always uses a project number, but may use project IDs in the future. Format: 'projects/{project number}' Example: 'projects/1234'", +"description": "Immutable. The linked Google Cloud project. When creating a BigQueryLink, you may provide this resource name using either a project number or project ID. Once this resource has been created, the returned project will always have a project that contains a project number. Format: 'projects/{project number}' Example: 'projects/1234'", "type": "string" }, "streamingExportEnabled": { diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index e93213206d6..4b699ada776 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -4732,7 +4732,7 @@ } } }, -"revision": "20240917", +"revision": "20241003", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Abi": { @@ -6227,17 +6227,6 @@ }, "type": "object" }, -"ExternalOfferInitialAcquisitionDetails": { -"description": "Details about the first time a user/device completed a transaction using external offers.", -"id": "ExternalOfferInitialAcquisitionDetails", -"properties": { -"externalTransactionId": { -"description": "Required. The external transaction id of the first completed purchase made by the user.", -"type": "string" -} -}, -"type": "object" -}, "ExternalSubscription": { "description": "Details of an external subscription.", "id": "ExternalSubscription", @@ -6279,10 +6268,6 @@ "description": "Output only. The current tax amount. This represents the current tax amount including any refunds that may have been applied to this transaction.", "readOnly": true }, -"externalOfferInitialAcquisitionDetails": { -"$ref": "ExternalOfferInitialAcquisitionDetails", -"description": "Optional. Details about the first time a user/device completed a transaction using external offers. Not required for transactions made using user choice billing or alternative billing only." -}, "externalTransactionId": { "description": "Output only. The id of this transaction. All transaction ids under the same package name must be unique. Set when creating the external transaction.", "readOnly": true, diff --git a/googleapiclient/discovery_cache/documents/apigee.v1.json b/googleapiclient/discovery_cache/documents/apigee.v1.json index bb461ae9018..7da2b10c9a2 100644 --- a/googleapiclient/discovery_cache/documents/apigee.v1.json +++ b/googleapiclient/discovery_cache/documents/apigee.v1.json @@ -227,6 +227,31 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"getControlPlaneAccess": { +"description": "Lists the service accounts allowed to access Apigee control plane directly for limited functionality. **Note**: Available to Apigee hybrid only.", +"flatPath": "v1/organizations/{organizationsId}/controlPlaneAccess", +"httpMethod": "GET", +"id": "apigee.organizations.getControlPlaneAccess", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Resource name of the Control Plane Access. Use the following structure in your request: `organizations/{org}/controlPlaneAccess`", +"location": "path", +"pattern": "^organizations/[^/]+/controlPlaneAccess$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudApigeeV1ControlPlaneAccess" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "getDeployedIngressConfig": { "description": "Gets the deployed ingress configuration for an organization.", "flatPath": "v1/organizations/{organizationsId}/deployedIngressConfig", @@ -479,6 +504,40 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, +"updateControlPlaneAccess": { +"description": "Updates the permissions required to allow Apigee runtime-plane components access to the control plane. Currently, the permissions required are to: 1. Allow runtime components to publish analytics data to the control plane. **Note**: Available to Apigee hybrid only.", +"flatPath": "v1/organizations/{organizationsId}/controlPlaneAccess", +"httpMethod": "PATCH", +"id": "apigee.organizations.updateControlPlaneAccess", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name of the ControlPlaneAccess. Format: \"organizations/{org}/controlPlaneAccess\"", +"location": "path", +"pattern": "^organizations/[^/]+/controlPlaneAccess$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "List of fields to be updated. Fields that can be updated: synchronizer_identities, publisher_identities.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudApigeeV1ControlPlaneAccess" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "updateSecuritySettings": { "description": "UpdateSecuritySettings updates the current security settings for API Security.", "flatPath": "v1/organizations/{organizationsId}/securitySettings", @@ -9503,6 +9562,163 @@ } } }, +"securityProfilesV2": { +"methods": { +"create": { +"description": "Create a security profile v2.", +"flatPath": "v1/organizations/{organizationsId}/securityProfilesV2", +"httpMethod": "POST", +"id": "apigee.organizations.securityProfilesV2.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The parent resource name.", +"location": "path", +"pattern": "^organizations/[^/]+$", +"required": true, +"type": "string" +}, +"securityProfileV2Id": { +"description": "Required. The security profile id.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/securityProfilesV2", +"request": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2" +}, +"response": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Delete a security profile v2.", +"flatPath": "v1/organizations/{organizationsId}/securityProfilesV2/{securityProfilesV2Id}", +"httpMethod": "DELETE", +"id": "apigee.organizations.securityProfilesV2.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the security profile v2 to delete.", +"location": "path", +"pattern": "^organizations/[^/]+/securityProfilesV2/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleProtobufEmpty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Get a security profile v2.", +"flatPath": "v1/organizations/{organizationsId}/securityProfilesV2/{securityProfilesV2Id}", +"httpMethod": "GET", +"id": "apigee.organizations.securityProfilesV2.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The security profile id.", +"location": "path", +"pattern": "^organizations/[^/]+/securityProfilesV2/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "List security profiles v2.", +"flatPath": "v1/organizations/{organizationsId}/securityProfilesV2", +"httpMethod": "GET", +"id": "apigee.organizations.securityProfilesV2.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The maximum number of profiles to return", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token, received from a previous `ListSecurityProfilesV2` call. Provide this to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. For a specific organization, list of all the security profiles. Format: `organizations/{org}`", +"location": "path", +"pattern": "^organizations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/securityProfilesV2", +"response": { +"$ref": "GoogleCloudApigeeV1ListSecurityProfilesV2Response" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Update a security profile V2.", +"flatPath": "v1/organizations/{organizationsId}/securityProfilesV2/{securityProfilesV2Id}", +"httpMethod": "PATCH", +"id": "apigee.organizations.securityProfilesV2.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}", +"location": "path", +"pattern": "^organizations/[^/]+/securityProfilesV2/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Required. The list of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2" +}, +"response": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "sharedflows": { "methods": { "create": { @@ -10157,7 +10373,7 @@ } } }, -"revision": "20240919", +"revision": "20241004", "rootUrl": "https://apigee.googleapis.com/", "schemas": { "EdgeConfigstoreBundleBadBundle": { @@ -11874,6 +12090,31 @@ }, "type": "object" }, +"GoogleCloudApigeeV1ControlPlaneAccess": { +"description": "ControlPlaneAccess is the request body and response body of UpdateControlPlaneAccess. and the response body of GetControlPlaneAccess. The input identities contains an array of service accounts to grant access to the respective control plane resource, with each service account specified using the following format: `serviceAccount:`***service-account-name***. The ***service-account-name*** is formatted like an email address. For example: `my-control-plane-service_account@my_project_id.iam.gserviceaccount.com` You might specify multiple service accounts, for example, if you have multiple environments and wish to assign a unique service account to each one.", +"id": "GoogleCloudApigeeV1ControlPlaneAccess", +"properties": { +"analyticsPublisherIdentities": { +"description": "Optional. Array of service accounts authorized to publish analytics data to the control plane (for the Message Processor component).", +"items": { +"type": "string" +}, +"type": "array" +}, +"name": { +"description": "Identifier. The resource name of the ControlPlaneAccess. Format: \"organizations/{org}/controlPlaneAccess\"", +"type": "string" +}, +"synchronizerIdentities": { +"description": "Optional. Array of service accounts to grant access to control plane resources (for the Synchronizer component). The service accounts must have **Apigee Synchronizer Manager** role. See also [Create service accounts](https://cloud.google.com/apigee/docs/hybrid/latest/sa-about#create-the-service-accounts).", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudApigeeV1Credential": { "id": "GoogleCloudApigeeV1Credential", "properties": { @@ -14703,6 +14944,24 @@ }, "type": "object" }, +"GoogleCloudApigeeV1ListSecurityProfilesV2Response": { +"description": "Response for ListSecurityProfilesV2.", +"id": "GoogleCloudApigeeV1ListSecurityProfilesV2Response", +"properties": { +"nextPageToken": { +"description": "A token that can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" +}, +"securityProfilesV2": { +"description": "List of security profiles in the organization.", +"items": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudApigeeV1ListSecurityReportsResponse": { "description": "The response for SecurityReports.", "id": "GoogleCloudApigeeV1ListSecurityReportsResponse", @@ -17344,6 +17603,68 @@ }, "type": "object" }, +"GoogleCloudApigeeV1SecurityProfileV2": { +"description": "Security profile for risk assessment version 2.", +"id": "GoogleCloudApigeeV1SecurityProfileV2", +"properties": { +"createTime": { +"description": "Output only. The time of the security profile creation.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. The description of the security profile.", +"type": "string" +}, +"googleDefined": { +"description": "Output only. Whether the security profile is google defined.", +"readOnly": true, +"type": "boolean" +}, +"name": { +"description": "Identifier. Name of the security profile v2 resource. Format: organizations/{org}/securityProfilesV2/{profile}", +"type": "string" +}, +"profileAssessmentConfigs": { +"additionalProperties": { +"$ref": "GoogleCloudApigeeV1SecurityProfileV2ProfileAssessmentConfig" +}, +"description": "Required. The configuration for each assessment in this profile. Key is the name/id of the assessment.", +"type": "object" +}, +"updateTime": { +"description": "Output only. The time of the security profile update.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudApigeeV1SecurityProfileV2ProfileAssessmentConfig": { +"description": "The configuration definition for a specific assessment.", +"id": "GoogleCloudApigeeV1SecurityProfileV2ProfileAssessmentConfig", +"properties": { +"weight": { +"description": "The weight of the assessment.", +"enum": [ +"WEIGHT_UNSPECIFIED", +"MINOR", +"MODERATE", +"MAJOR" +], +"enumDescriptions": [ +"The weight is unspecified.", +"The weight is minor.", +"The weight is moderate.", +"The weight is major." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudApigeeV1SecurityReport": { "description": "SecurityReport saves all the information about the created security report.", "id": "GoogleCloudApigeeV1SecurityReport", diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json index 670939c1dcf..154dbfe25d9 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json @@ -884,7 +884,7 @@ "attachments": { "methods": { "create": { -"description": "Creates an attachment. The returned Operation will finish once the attachment has been created. Its response will be the created Attachment.", +"description": "Creates an attachment. The returned Operation will finish once the attachment has been created. Its response will be the created attachment.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/attachments", "httpMethod": "POST", "id": "artifactregistry.projects.locations.repositories.attachments.create", @@ -917,7 +917,7 @@ ] }, "delete": { -"description": "Deletes an attachment. The returned Operation will finish once the attachments has been deleted. It will not have any Operation metadata and will return a google.protobuf.Empty response.", +"description": "Deletes an attachment. The returned Operation will finish once the attachments has been deleted. It will not have any Operation metadata and will return a `google.protobuf.Empty` response.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/attachments/{attachmentsId}", "httpMethod": "DELETE", "id": "artifactregistry.projects.locations.repositories.attachments.delete", @@ -968,7 +968,7 @@ ] }, "list": { -"description": "Lists repositories.", +"description": "Lists attachments.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/attachments", "httpMethod": "GET", "id": "artifactregistry.projects.locations.repositories.attachments.list", @@ -1246,7 +1246,7 @@ ] }, "upload": { -"description": "Directly uploads a File to a repository. The returned Operation will complete once the resources are uploaded.", +"description": "Directly uploads a file to a repository. The returned Operation will complete once the resources are uploaded.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/repositories/{repositoriesId}/files:upload", "httpMethod": "POST", "id": "artifactregistry.projects.locations.repositories.files.upload", @@ -2106,7 +2106,7 @@ ], "parameters": { "name": { -"description": "The name of the version, for example: \"projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1\". If the package or version ID parts contain slashes, the slashes are escaped.", +"description": "The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/repositories/[^/]+/packages/[^/]+/versions/[^/]+$", "required": true, @@ -2439,7 +2439,7 @@ } } }, -"revision": "20240923", +"revision": "20241001", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { @@ -2506,18 +2506,18 @@ "type": "object" }, "Attachment": { -"description": "An Attachment refers to additional metadata that can be attached to artifacts in ArtifactRegistry. An attachment consists of one or more files.", +"description": "An Attachment refers to additional metadata that can be attached to artifacts in Artifact Registry. An attachment consists of one or more files.", "id": "Attachment", "properties": { "annotations": { "additionalProperties": { "type": "string" }, -"description": "Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations. Client specified annotations.", +"description": "Optional. User annotations. These attributes can only be set and used by the user, and not by Artifact Registry. See https://google.aip.dev/128#annotations for more details such as format and size limitations.", "type": "object" }, "attachmentNamespace": { -"description": "The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to artifactanalysis.googleapis.com.", +"description": "The namespace this attachment belongs to. E.g. If an Attachment is created by artifact analysis, namespace is set to `artifactanalysis.googleapis.com`.", "type": "string" }, "createTime": { @@ -2527,7 +2527,7 @@ "type": "string" }, "files": { -"description": "Required. The files that blong to this Attachment. If the file ID part contains slashes, they are escaped. E.g. \"projects/p1/locations/us-central1/repositories/repo1/files/sha:\".", +"description": "Required. The files that belong to this attachment. If the file ID part contains slashes, they are escaped. E.g. `projects/p1/locations/us-central1/repositories/repo1/files/sha:`.", "items": { "type": "string" }, @@ -2538,7 +2538,7 @@ "type": "string" }, "ociVersionName": { -"description": "Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. \"projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1\".", +"description": "Output only. The name of the OCI version that this attachment created. Only populated for Docker attachments. E.g. `projects/p1/locations/us-central1/repositories/repo1/packages/p1/versions/v1`.", "readOnly": true, "type": "string" }, @@ -2547,7 +2547,7 @@ "type": "string" }, "type": { -"description": "Type of Attachment. E.g. application/vnd.spdx+jsonn", +"description": "Type of Attachment. E.g. `application/vnd.spdx+json`", "type": "string" }, "updateTime": { @@ -2724,7 +2724,7 @@ "id": "CommonRemoteRepository", "properties": { "uri": { -"description": "Required. A common public repository base for Remote Repository.", +"description": "Required. A common public repository base for remote repository.", "type": "string" } }, @@ -3938,12 +3938,6 @@ false }, "type": "object" }, -"PromoteArtifactMetadata": { -"description": "The metadata for promote artifact long running operation.", -"id": "PromoteArtifactMetadata", -"properties": {}, -"type": "object" -}, "PythonPackage": { "description": "PythonPackage represents a python artifact.", "id": "PythonPackage", @@ -4012,7 +4006,7 @@ false }, "commonRepository": { "$ref": "CommonRemoteRepository", -"description": "Common remote repository settings. Used as the RR upstream URL instead of Predefined and Custom remote repositories. UI and Gcloud will map all the new remote repositories to this field." +"description": "Common remote repository settings. Used as the RemoteRepository upstream URL instead of Predefined and Custom remote repositories. Google Cloud Console and Google Cloud CLI will map all the new remote repositories to this field." }, "description": { "description": "The description of the remote source.", @@ -4593,7 +4587,7 @@ false "type": "object" }, "name": { -"description": "The name of the version, for example: \"projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1\". If the package or version ID parts contain slashes, the slashes are escaped.", +"description": "The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped.", "type": "string" }, "relatedTags": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json index 51e36e9b4a7..5b0f8bda3df 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json @@ -1156,7 +1156,7 @@ } } }, -"revision": "20240903", +"revision": "20241001", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "Binding": { @@ -1674,7 +1674,7 @@ "type": "string" }, "name": { -"description": "The name of the version, for example: \"projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1\". If the package or version ID parts contain slashes, the slashes are escaped.", +"description": "The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped.", "type": "string" }, "relatedTags": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json index 8fc1252ffea..adbeb1e26c9 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json @@ -1424,7 +1424,7 @@ } } }, -"revision": "20240903", +"revision": "20241001", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { @@ -2307,7 +2307,7 @@ false "type": "object" }, "name": { -"description": "The name of the version, for example: \"projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1\". If the package or version ID parts contain slashes, the slashes are escaped.", +"description": "The name of the version, for example: `projects/p1/locations/us-central1/repositories/repo1/packages/pkg1/versions/art1`. If the package or version ID parts contain slashes, the slashes are escaped.", "type": "string" }, "relatedTags": { diff --git a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json index c27ff921b7f..78163371a4b 100644 --- a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json +++ b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json @@ -1367,7 +1367,7 @@ } } }, -"revision": "20240708", +"revision": "20241002", "rootUrl": "https://authorizedbuyersmarketplace.googleapis.com/", "schemas": { "AcceptProposalRequest": { @@ -3111,22 +3111,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1alpha.json b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1alpha.json index 3e368512dfa..4ff4282fdc9 100644 --- a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1alpha.json @@ -1550,7 +1550,7 @@ } } }, -"revision": "20240924", +"revision": "20241002", "rootUrl": "https://authorizedbuyersmarketplace.googleapis.com/", "schemas": { "AcceptProposalRequest": { @@ -3366,22 +3366,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/backupdr.v1.json b/googleapiclient/discovery_cache/documents/backupdr.v1.json index 4597576ffaa..d8b77de3dc3 100644 --- a/googleapiclient/discovery_cache/documents/backupdr.v1.json +++ b/googleapiclient/discovery_cache/documents/backupdr.v1.json @@ -1658,7 +1658,7 @@ } } }, -"revision": "20240918", +"revision": "20240920", "rootUrl": "https://backupdr.googleapis.com/", "schemas": { "AbandonBackupRequest": { @@ -3390,6 +3390,25 @@ }, "type": "object" }, +"GcpResource": { +"description": "Minimum details to identify a Google Cloud resource", +"id": "GcpResource", +"properties": { +"gcpResourcename": { +"description": "Name of the Google Cloud resource.", +"type": "string" +}, +"location": { +"description": "Location of the resource: //\"global\"/\"unspecified\".", +"type": "string" +}, +"type": { +"description": "Type of the resource. Use the Unified Resource Type, eg. compute.googleapis.com/Instance.", +"type": "string" +} +}, +"type": "object" +}, "GuestOsFeature": { "description": "Feature type of the Guest OS.", "id": "GuestOsFeature", @@ -4455,6 +4474,17 @@ }, "type": "object" }, +"RestoreBackupResponse": { +"description": "Response message for restoring from a Backup.", +"id": "RestoreBackupResponse", +"properties": { +"targetResource": { +"$ref": "TargetResource", +"description": "Details of the target resource created/modified as part of restore." +} +}, +"type": "object" +}, "RuleConfigInfo": { "description": "Message for rules config info.", "id": "RuleConfigInfo", @@ -4664,6 +4694,12 @@ }, "type": "object" }, +"SetInternalStatusResponse": { +"description": "Response message from SetStatusInternal method.", +"id": "SetInternalStatusResponse", +"properties": {}, +"type": "object" +}, "SpannerLocation": { "id": "SpannerLocation", "properties": { @@ -4841,6 +4877,17 @@ }, "type": "object" }, +"TargetResource": { +"description": "Details of the target resource created/modified as part of restore.", +"id": "TargetResource", +"properties": { +"gcpResource": { +"$ref": "GcpResource", +"description": "Details of the native Google Cloud resource created as part of restore." +} +}, +"type": "object" +}, "TenantProjectProxy": { "id": "TenantProjectProxy", "properties": { diff --git a/googleapiclient/discovery_cache/documents/batch.v1.json b/googleapiclient/discovery_cache/documents/batch.v1.json index 34180e1f9cf..86bb093db4d 100644 --- a/googleapiclient/discovery_cache/documents/batch.v1.json +++ b/googleapiclient/discovery_cache/documents/batch.v1.json @@ -561,7 +561,7 @@ } } }, -"revision": "20240919", +"revision": "20240925", "rootUrl": "https://batch.googleapis.com/", "schemas": { "Accelerator": { @@ -1264,7 +1264,7 @@ "type": "string" }, "reservation": { -"description": "Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation.", +"description": "Optional. If specified, VMs will consume only the specified reservation. If not specified (default), VMs will consume any applicable reservation. Additionally, VMs will not consume any reservation if \"NO_RESERVATION\" is specified.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/beyondcorp.v1.json b/googleapiclient/discovery_cache/documents/beyondcorp.v1.json index 343cd21f265..9ec2b9dfa25 100644 --- a/googleapiclient/discovery_cache/documents/beyondcorp.v1.json +++ b/googleapiclient/discovery_cache/documents/beyondcorp.v1.json @@ -1713,6 +1713,95 @@ } } }, +"global": { +"resources": { +"securityGateways": { +"resources": { +"applications": { +"methods": { +"create": { +"description": "Creates a new Application in a given project and location.", +"flatPath": "v1/projects/{projectsId}/locations/global/securityGateways/{securityGatewaysId}/applications", +"httpMethod": "POST", +"id": "beyondcorp.projects.locations.global.securityGateways.applications.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"applicationId": { +"description": "Optional. User-settable Application resource ID. * Must start with a letter. * Must contain between 4-63 characters from `/a-z-/`. * Must end with a number or letter.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the parent SecurityGateway using the form: `projects/{project_id}/locations/global/securityGateways/{security_gateway_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/global/securityGateways/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/applications", +"request": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1Application" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates the parameters of a single Application.", +"flatPath": "v1/projects/{projectsId}/locations/global/securityGateways/{securityGatewaysId}/applications/{applicationsId}", +"httpMethod": "PATCH", +"id": "beyondcorp.projects.locations.global.securityGateways.applications.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. Name of the resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/global/securityGateways/[^/]+/applications/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request timed out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"updateMask": { +"description": "Required. Mutable fields include: display_name.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1Application" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +}, "operations": { "methods": { "cancel": { @@ -1744,91 +1833,416 @@ ] }, "delete": { -"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "beyondcorp.projects.locations.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "beyondcorp.projects.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "beyondcorp.projects.locations.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}/operations", +"response": { +"$ref": "GoogleLongrunningListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, +"securityGateways": { +"methods": { +"create": { +"description": "Creates a new SecurityGateway in a given project and location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways", +"httpMethod": "POST", +"id": "beyondcorp.projects.locations.securityGateways.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource project name of the SecurityGateway location using the form: `projects/{project_id}/locations/{location_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request.", +"location": "query", +"type": "string" +}, +"securityGatewayId": { +"description": "Optional. User-settable SecurityGateway resource ID. * Must start with a letter. * Must contain between 4-63 characters from `/a-z-/`. * Must end with a number or letter.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/securityGateways", +"request": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGateway" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a single SecurityGateway.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}", +"httpMethod": "DELETE", +"id": "beyondcorp.projects.locations.securityGateways.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. BeyondCorp SecurityGateway name using the form: `projects/{project_id}/locations/{location_id}/securityGateways/{security_gateway_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"validateOnly": { +"description": "Optional. If set, validates request by executing a dry-run which would not alter the resource in any way.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets details of a single SecurityGateway.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}", +"httpMethod": "GET", +"id": "beyondcorp.projects.locations.securityGateways.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the PartnerTenant using the form: `projects/{project_id}/locations/{location_id}/securityGateway/{security_gateway_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGateway" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists SecurityGateways in a given project and location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways", +"httpMethod": "GET", +"id": "beyondcorp.projects.locations.securityGateways.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. A filter specifying constraints of a list operation. All fields in the SecurityGateway message are supported. For example, the following query will return the SecurityGateway with displayName \"test-security-gateway\" For more information, please refer to https://google.aip.dev/160.", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. The maximum number of items to return. If not specified, a default value of 50 will be used by the service. Regardless of the page_size value, the response may include a partial list and a caller should only rely on response's next_page_token to determine if there are more instances left to be queried.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The next_page_token value returned from a previous ListSecurityGatewayRequest, if any.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent location to which the resources belong. `projects/{project_id}/locations/{location_id}/`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/securityGateways", +"response": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1ListSecurityGatewaysResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates the parameters of a single SecurityGateway.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}", +"httpMethod": "PATCH", +"id": "beyondcorp.projects.locations.securityGateways.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. Name of the resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request timed out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"updateMask": { +"description": "Required. Mutable fields include: display_name, hubs.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGateway" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"setPeering": { +"description": "This is a custom method to allow customers to create a peering connections between Google network and customer networks. This is enabled only for the allowlisted customers.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}:setPeering", +"httpMethod": "POST", +"id": "beyondcorp.projects.locations.securityGateways.setPeering", +"parameterOrder": [ +"securityGateway" +], +"parameters": { +"securityGateway": { +"description": "Required. BeyondCorp SecurityGateway name using the form: `projects/{project}/locations/{location}/securityGateways/{security_gateway}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+securityGateway}:setPeering", +"request": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1SetPeeringRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"applications": { +"methods": { +"delete": { +"description": "Deletes a single Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}/applications/{applicationsId}", "httpMethod": "DELETE", -"id": "beyondcorp.projects.locations.operations.delete", +"id": "beyondcorp.projects.locations.securityGateways.applications.delete", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource to be deleted.", +"description": "Required. Name of the resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+/applications/[^/]+$", "required": true, "type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"validateOnly": { +"description": "Optional. If set, validates request by executing a dry-run which would not alter the resource in any way.", +"location": "query", +"type": "boolean" } }, "path": "v1/{+name}", "response": { -"$ref": "Empty" +"$ref": "GoogleLongrunningOperation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "get": { -"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"description": "Gets details of a single Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}/applications/{applicationsId}", "httpMethod": "GET", -"id": "beyondcorp.projects.locations.operations.get", +"id": "beyondcorp.projects.locations.securityGateways.applications.get", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource.", +"description": "Required. The resource name of the Application using the form: `projects/{project_id}/locations/global/securityGateway/{security_gateway_id}/applications/{application_id}`", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+/applications/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { -"$ref": "GoogleLongrunningOperation" +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1Application" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", +"description": "Lists Applications in a given project and location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/securityGateways/{securityGatewaysId}/applications", "httpMethod": "GET", -"id": "beyondcorp.projects.locations.operations.list", +"id": "beyondcorp.projects.locations.securityGateways.applications.list", "parameterOrder": [ -"name" +"parent" ], "parameters": { "filter": { -"description": "The standard list filter.", +"description": "Optional. A filter specifying constraints of a list operation. All fields in the Application message are supported. For example, the following query will return the Application with displayName \"test-application\" For more information, please refer to https://google.aip.dev/160.", "location": "query", "type": "string" }, -"name": { -"description": "The name of the operation's parent resource.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", -"required": true, +"orderBy": { +"description": "Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.", +"location": "query", "type": "string" }, "pageSize": { -"description": "The standard list page size.", +"description": "Optional. The maximum number of items to return. If not specified, a default value of 50 will be used by the service. Regardless of the page_size value, the response may include a partial list and a caller should only rely on response's next_page_token to determine if there are more instances left to be queried.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The standard list page token.", +"description": "Optional. The next_page_token value returned from a previous ListApplicationsRequest, if any.", "location": "query", "type": "string" +}, +"parent": { +"description": "Required. The parent location to which the resources belong. `projects/{project_id}/locations/global/securityGateways/{security_gateway_id}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/securityGateways/[^/]+$", +"required": true, +"type": "string" } }, -"path": "v1/{+name}/operations", +"path": "v1/{+parent}/applications", "response": { -"$ref": "GoogleLongrunningListOperationsResponse" +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1ListApplicationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -1840,8 +2254,10 @@ } } } +} +} }, -"revision": "20240802", +"revision": "20240925", "rootUrl": "https://beyondcorp.googleapis.com/", "schemas": { "AllocatedConnection": { @@ -3072,6 +3488,284 @@ }, "type": "object" }, +"GoogleCloudBeyondcorpSecuritygatewaysV1Application": { +"description": "A Beyondcorp Application resource information.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1Application", +"properties": { +"createTime": { +"description": "Output only. Timestamp when the resource was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"displayName": { +"description": "Optional. An arbitrary user-provided name for the Application resource. Cannot exceed 64 characters.", +"type": "string" +}, +"endpointMatchers": { +"description": "Required. Endpoint matchers associated with an application. A combination of hostname and ports as endpoint matcher is used to match the application. Match conditions for OR logic. An array of match conditions to allow for multiple matching criteria. The rule is considered a match if one the conditions are met. The conditions can be one of the following combination (Hostname), (Hostname & Ports) EXAMPLES: Hostname - (\"*.abc.com\"), (\"xyz.abc.com\") Hostname and Ports - (\"abc.com\" and \"22\"), (\"abc.com\" and \"22,33\") etc", +"items": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1EndpointMatcher" +}, +"type": "array" +}, +"name": { +"description": "Identifier. Name of the resource.", +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when the resource was last modified.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1EndpointMatcher": { +"description": "EndpointMatcher contains the information of the endpoint that will match the application.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1EndpointMatcher", +"properties": { +"hostname": { +"description": "Required. Hostname of the application.", +"type": "string" +}, +"ports": { +"description": "Optional. Ports of the application.", +"items": { +"format": "int32", +"type": "integer" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1Hub": { +"description": "The Hub message contains information pertaining to the regional data path deployments.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1Hub", +"properties": { +"natGatewayConfig": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1NatGatewayConfig", +"description": "Optional. NAT gateway setup to ensure enough NAT IP addresses are available to handle the traffic needed to access the applications. Allows to explicitly enable or disable the NAT in the Hub along with the total IPs allocated to handle the capacity limits." +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1ListApplicationsResponse": { +"description": "Message for response to listing Applications.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1ListApplicationsResponse", +"properties": { +"applications": { +"description": "A list of BeyondCorp Application in the project.", +"items": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1Application" +}, +"type": "array" +}, +"nextPageToken": { +"description": "A token to retrieve the next page of results, or empty if there are no more results in the list.", +"type": "string" +}, +"unreachable": { +"description": "A list of locations that could not be reached.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1ListSecurityGatewaysResponse": { +"description": "Message for response to listing SecurityGateways.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1ListSecurityGatewaysResponse", +"properties": { +"nextPageToken": { +"description": "A token to retrieve the next page of results, or empty if there are no more results in the list.", +"type": "string" +}, +"securityGateways": { +"description": "A list of BeyondCorp SecurityGateway in the project.", +"items": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGateway" +}, +"type": "array" +}, +"unreachable": { +"description": "A list of locations that could not be reached.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1NatGatewayConfig": { +"description": "Represents the NAT Gateway configuration.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1NatGatewayConfig", +"properties": { +"natIps": { +"description": "Output only. List of NAT IPs that will be used for establishing connection to the endpoints.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1Peering": { +"description": "VPC Peering details.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1Peering", +"properties": { +"dnsZones": { +"description": "Optional. List of DNS zones for DNS peering with the customer VPC network.", +"items": { +"type": "string" +}, +"type": "array" +}, +"targetVpcNetwork": { +"description": "Required. The name of the Target VPC network name in the format: `projects/{project}/global/networks/{network}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGateway": { +"description": "Information about a BeyoncCorp SecurityGateway resource.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGateway", +"properties": { +"createTime": { +"description": "Output only. Timestamp when the resource was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"displayName": { +"description": "Optional. An arbitrary user-provided name for the SecurityGateway. Cannot exceed 64 characters.", +"type": "string" +}, +"externalIps": { +"description": "Output only. IP addresses that will be used for establishing connection to the endpoints.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"hubs": { +"additionalProperties": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1Hub" +}, +"description": "Optional. Map of Hubs that represents regional data path deployment with GCP region as a key.", +"type": "object" +}, +"name": { +"description": "Identifier. Name of the resource.", +"type": "string" +}, +"state": { +"description": "Output only. The operational state of the SecurityGateway.", +"enum": [ +"STATE_UNSPECIFIED", +"CREATING", +"UPDATING", +"DELETING", +"RUNNING", +"DOWN", +"ERROR" +], +"enumDescriptions": [ +"Default value. This value is unused.", +"SecurityGateway is being created.", +"SecurityGateway is being updated.", +"SecurityGateway is being deleted.", +"SecurityGateway is running.", +"SecurityGateway is down and may be restored in the future. This happens when CCFE sends ProjectState = OFF.", +"SecurityGateway encountered an error and is in an indeterministic state." +], +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when the resource was last modified.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGatewayOperationMetadata": { +"description": "Represents the metadata of the long-running operation.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGatewayOperationMetadata", +"properties": { +"apiVersion": { +"description": "Output only. API version used to start the operation.", +"readOnly": true, +"type": "string" +}, +"createTime": { +"description": "Output only. The time the operation was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"endTime": { +"description": "Output only. The time the operation finished running.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"requestedCancellation": { +"description": "Output only. Identifies whether the user has requested cancellation of the operation. Operations that have been cancelled successfully have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"readOnly": true, +"type": "boolean" +}, +"statusMessage": { +"description": "Output only. Human-readable status of the operation, if any.", +"readOnly": true, +"type": "string" +}, +"target": { +"description": "Output only. Server-defined resource path for the target of the operation.", +"readOnly": true, +"type": "string" +}, +"verb": { +"description": "Output only. Name of the verb executed by the operation.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudBeyondcorpSecuritygatewaysV1SetPeeringRequest": { +"description": "Set Peering request for creating a VPC peering between Google network and customer networks.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1SetPeeringRequest", +"properties": { +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"type": "string" +}, +"validateOnly": { +"description": "Optional. If set, validates request by executing a dry-run which would not alter the resource in any way.", +"type": "boolean" +}, +"vpcPeerings": { +"description": "Required. List of Peering connection information.", +"items": { +"$ref": "GoogleCloudBeyondcorpSecuritygatewaysV1Peering" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudBeyondcorpSecuritygatewaysV1alphaSecurityGatewayOperationMetadata": { "description": "Represents the metadata of the long-running operation.", "id": "GoogleCloudBeyondcorpSecuritygatewaysV1alphaSecurityGatewayOperationMetadata", diff --git a/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json b/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json index 700a61c330a..4701a82a5f6 100644 --- a/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/beyondcorp.v1alpha.json @@ -3807,97 +3807,6 @@ } } }, -"netConnections": { -"methods": { -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/netConnections/{netConnectionsId}:getIamPolicy", -"httpMethod": "GET", -"id": "beyondcorp.projects.locations.netConnections.getIamPolicy", -"parameterOrder": [ -"resource" -], -"parameters": { -"options.requestedPolicyVersion": { -"description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).", -"format": "int32", -"location": "query", -"type": "integer" -}, -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/netConnections/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1alpha/{+resource}:getIamPolicy", -"response": { -"$ref": "GoogleIamV1Policy" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"setIamPolicy": { -"description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.", -"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/netConnections/{netConnectionsId}:setIamPolicy", -"httpMethod": "POST", -"id": "beyondcorp.projects.locations.netConnections.setIamPolicy", -"parameterOrder": [ -"resource" -], -"parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/netConnections/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1alpha/{+resource}:setIamPolicy", -"request": { -"$ref": "GoogleIamV1SetIamPolicyRequest" -}, -"response": { -"$ref": "GoogleIamV1Policy" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"testIamPermissions": { -"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/netConnections/{netConnectionsId}:testIamPermissions", -"httpMethod": "POST", -"id": "beyondcorp.projects.locations.netConnections.testIamPermissions", -"parameterOrder": [ -"resource" -], -"parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/netConnections/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1alpha/{+resource}:testIamPermissions", -"request": { -"$ref": "GoogleIamV1TestIamPermissionsRequest" -}, -"response": { -"$ref": "GoogleIamV1TestIamPermissionsResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -} -} -}, "operations": { "methods": { "cancel": { @@ -4499,7 +4408,7 @@ } } }, -"revision": "20240918", +"revision": "20240925", "rootUrl": "https://beyondcorp.googleapis.com/", "schemas": { "AllocatedConnection": { @@ -6673,6 +6582,50 @@ }, "type": "object" }, +"GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGatewayOperationMetadata": { +"description": "Represents the metadata of the long-running operation.", +"id": "GoogleCloudBeyondcorpSecuritygatewaysV1SecurityGatewayOperationMetadata", +"properties": { +"apiVersion": { +"description": "Output only. API version used to start the operation.", +"readOnly": true, +"type": "string" +}, +"createTime": { +"description": "Output only. The time the operation was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"endTime": { +"description": "Output only. The time the operation finished running.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"requestedCancellation": { +"description": "Output only. Identifies whether the user has requested cancellation of the operation. Operations that have been cancelled successfully have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.", +"readOnly": true, +"type": "boolean" +}, +"statusMessage": { +"description": "Output only. Human-readable status of the operation, if any.", +"readOnly": true, +"type": "string" +}, +"target": { +"description": "Output only. Server-defined resource path for the target of the operation.", +"readOnly": true, +"type": "string" +}, +"verb": { +"description": "Output only. Name of the verb executed by the operation.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudBeyondcorpSecuritygatewaysV1alphaApplication": { "description": "A Beyondcorp Application resource information.", "id": "GoogleCloudBeyondcorpSecuritygatewaysV1alphaApplication", diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json index 8aab8db39aa..a2120c872e3 100644 --- a/googleapiclient/discovery_cache/documents/bigquery.v2.json +++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json @@ -56,18 +56,63 @@ }, { "description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-central1.rep.googleapis.com/", +"location": "us-central1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-central2.rep.googleapis.com/", +"location": "us-central2" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-east1.rep.googleapis.com/", +"location": "us-east1" +}, +{ +"description": "Regional Endpoint", "endpointUrl": "https://bigquery.us-east4.rep.googleapis.com/", "location": "us-east4" }, { "description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-east5.rep.googleapis.com/", +"location": "us-east5" +}, +{ +"description": "Regional Endpoint", "endpointUrl": "https://bigquery.us-east7.rep.googleapis.com/", "location": "us-east7" }, { "description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-south1.rep.googleapis.com/", +"location": "us-south1" +}, +{ +"description": "Regional Endpoint", "endpointUrl": "https://bigquery.us-west1.rep.googleapis.com/", "location": "us-west1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-west2.rep.googleapis.com/", +"location": "us-west2" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-west3.rep.googleapis.com/", +"location": "us-west3" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-west4.rep.googleapis.com/", +"location": "us-west4" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://bigquery.us-west8.rep.googleapis.com/", +"location": "us-west8" } ], "fullyEncodeReservedExpansion": true, @@ -1940,7 +1985,7 @@ } } }, -"revision": "20240905", +"revision": "20240919", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { @@ -6424,7 +6469,8 @@ "RANDOM_FOREST_CLASSIFIER", "TENSORFLOW_LITE", "ONNX", -"TRANSFORM_ONLY" +"TRANSFORM_ONLY", +"CONTRIBUTION_ANALYSIS" ], "enumDescriptions": [ "Default value.", @@ -6451,7 +6497,8 @@ "Random forest classifier model.", "An imported TensorFlow Lite model.", "An imported ONNX model.", -"Model to capture the columns and logic in the TRANSFORM clause along with statistics useful for ML analytic functions." +"Model to capture the columns and logic in the TRANSFORM clause along with statistics useful for ML analytic functions.", +"The contribution analysis model." ], "readOnly": true, "type": "string" @@ -6595,7 +6642,8 @@ "RANDOM_FOREST_CLASSIFIER", "TENSORFLOW_LITE", "ONNX", -"TRANSFORM_ONLY" +"TRANSFORM_ONLY", +"CONTRIBUTION_ANALYSIS" ], "enumDescriptions": [ "Default value.", @@ -6622,7 +6670,8 @@ "Random forest classifier model.", "An imported TensorFlow Lite model.", "An imported ONNX model.", -"Model to capture the columns and logic in the TRANSFORM clause along with statistics useful for ML analytic functions." +"Model to capture the columns and logic in the TRANSFORM clause along with statistics useful for ML analytic functions.", +"The contribution analysis model." ], "readOnly": true, "type": "string" @@ -9329,6 +9378,10 @@ "format": "double", "type": "number" }, +"contributionMetric": { +"description": "The contribution metric. Applies to contribution analysis models. Allowed formats supported are for summable and summable ratio contribution metrics. These include expressions such as \"SUM(x)\" or \"SUM(x)/SUM(y)\", where x and y are column names from the base table.", +"type": "string" +}, "dartNormalizeType": { "description": "Type of normalization algorithm for boosted tree models using dart booster.", "enum": [ @@ -9402,6 +9455,13 @@ "description": "If true, perform decompose time series and save the results.", "type": "boolean" }, +"dimensionIdColumns": { +"description": "Optional. Names of the columns to slice on. Applies to contribution analysis models.", +"items": { +"type": "string" +}, +"type": "array" +}, "distanceType": { "description": "Distance type for clustering models.", "enum": [ @@ -9825,6 +9885,10 @@ "format": "int64", "type": "string" }, +"isTestColumn": { +"description": "Name of the column used to determine the rows corresponding to control and test. Applies to contribution analysis models.", +"type": "string" +}, "itemColumn": { "description": "Item column specified for matrix factorization models.", "type": "string" @@ -9925,6 +9989,11 @@ "format": "int64", "type": "string" }, +"minAprioriSupport": { +"description": "The apriori support minimum. Applies to contribution analysis models.", +"format": "double", +"type": "number" +}, "minRelativeProgress": { "description": "When early_stop is true, stops training when accuracy improvement is less than 'min_relative_progress'. Used only for iterative training algorithms.", "format": "double", diff --git a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json index d29eb233025..4f199862343 100644 --- a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json +++ b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json @@ -294,25 +294,25 @@ "type": "string" }, "dailySubEntityType.timeOfDay.hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "location": "query", "type": "integer" }, "dailySubEntityType.timeOfDay.minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "location": "query", "type": "integer" }, "dailySubEntityType.timeOfDay.nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "location": "query", "type": "integer" }, "dailySubEntityType.timeOfDay.seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "location": "query", "type": "integer" @@ -417,7 +417,7 @@ } } }, -"revision": "20240707", +"revision": "20241002", "rootUrl": "https://businessprofileperformance.googleapis.com/", "schemas": { "DailyMetricTimeSeries": { @@ -635,22 +635,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json index 92838bc4993..760c597935b 100644 --- a/googleapiclient/discovery_cache/documents/calendar.v3.json +++ b/googleapiclient/discovery_cache/documents/calendar.v3.json @@ -1767,7 +1767,7 @@ } } }, -"revision": "20240906", +"revision": "20240927", "rootUrl": "https://www.googleapis.com/", "schemas": { "Acl": { @@ -2737,7 +2737,7 @@ "type": "boolean" }, "responseStatus": { -"description": "The attendee's response status. Possible values are: \n- \"needsAction\" - The attendee has not responded to the invitation (recommended for new events). \n- \"declined\" - The attendee has declined the invitation. \n- \"tentative\" - The attendee has tentatively accepted the invitation. \n- \"accepted\" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the \"Add invitations to my calendar\" setting set to \"When I respond to invitation in email\" won't see an event on their calendar unless they choose to change their invitation response in the event invitation email.", +"description": "The attendee's response status. Possible values are: \n- \"needsAction\" - The attendee has not responded to the invitation (recommended for new events). \n- \"declined\" - The attendee has declined the invitation. \n- \"tentative\" - The attendee has tentatively accepted the invitation. \n- \"accepted\" - The attendee has accepted the invitation. Warning: If you add an event using the values declined, tentative, or accepted, attendees with the \"Add invitations to my calendar\" setting set to \"When I respond to invitation in email\" or \"Only if the sender is known\" might have their response reset to needsAction and won't see an event in their calendar unless they change their response in the event invitation email. Furthermore, if more than 200 guests are invited to the event, response status is not propagated to the guests.", "type": "string" }, "self": { diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json index 02cbc33e750..a5406d8a3ac 100644 --- a/googleapiclient/discovery_cache/documents/chat.v1.json +++ b/googleapiclient/discovery_cache/documents/chat.v1.json @@ -290,7 +290,7 @@ ] }, "create": { -"description": "Creates a space with no members. Can be used to create a named space. Spaces grouped by topics aren't supported. For an example, see [Create a space](https://developers.google.com/workspace/chat/create-spaces). If you receive the error message `ALREADY_EXISTS` when creating a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. If you're a member of the [Developer Preview program](https://developers.google.com/workspace/preview), you can create a group chat in import mode using `spaceType.GROUP_CHAT`. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Creates a space with no members. Can be used to create a named space. Spaces grouped by topics aren't supported. For an example, see [Create a space](https://developers.google.com/workspace/chat/create-spaces). If you receive the error message `ALREADY_EXISTS` when creating a space, try a different `displayName`. An existing space within the Google Workspace organization might already use this display name. If you're a member of the [Developer Preview program](https://developers.google.com/workspace/preview), you can create a group chat in import mode using `spaceType.GROUP_CHAT`. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When authenticating as an app, the `space.customer` field must be set in the request.", "flatPath": "v1/spaces", "httpMethod": "POST", "id": "chat.spaces.create", @@ -318,7 +318,7 @@ ] }, "delete": { -"description": "Deletes a named space. Always performs a cascading delete, which means that the space's child resources\u2014like messages posted in the space and memberships in the space\u2014are also deleted. For an example, see [Delete a space](https://developers.google.com/workspace/chat/delete-spaces). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) from a user who has permission to delete the space.", +"description": "Deletes a named space. Always performs a cascading delete, which means that the space's child resources\u2014like messages posted in the space and memberships in the space\u2014are also deleted. For an example, see [Delete a space](https://developers.google.com/workspace/chat/delete-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}", "httpMethod": "DELETE", "id": "chat.spaces.delete", @@ -351,7 +351,7 @@ ] }, "findDirectMessage": { -"description": "Returns the existing direct message with the specified user. If no direct message space is found, returns a `404 NOT_FOUND` error. For an example, see [Find a direct message](/chat/api/guides/v1/spaces/find-direct-message). With [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), returns the direct message space between the specified user and the authenticated user. With [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app), returns the direct message space between the specified user and the calling Chat app. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) or [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app).", +"description": "Returns the existing direct message with the specified user. If no direct message space is found, returns a `404 NOT_FOUND` error. For an example, see [Find a direct message](/chat/api/guides/v1/spaces/find-direct-message). With [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app), returns the direct message space between the specified user and the calling Chat app. With [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), returns the direct message space between the specified user and the authenticated user. // Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces:findDirectMessage", "httpMethod": "GET", "id": "chat.spaces.findDirectMessage", @@ -374,7 +374,7 @@ ] }, "get": { -"description": "Returns details about a space. For an example, see [Get details about a space](https://developers.google.com/workspace/chat/get-spaces). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Returns details about a space. For an example, see [Get details about a space](https://developers.google.com/workspace/chat/get-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}", "httpMethod": "GET", "id": "chat.spaces.get", @@ -409,7 +409,7 @@ ] }, "list": { -"description": "Lists spaces the caller is a member of. Group chats and DMs aren't listed until the first message is sent. For an example, see [List spaces](https://developers.google.com/workspace/chat/list-spaces). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). Lists spaces visible to the caller or authenticated user. Group chats and DMs aren't listed until the first message is sent. To list all named spaces by Google Workspace organization, use the [`spaces.search()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/search) method using Workspace administrator privileges instead.", +"description": "Lists spaces the caller is a member of. Group chats and DMs aren't listed until the first message is sent. For an example, see [List spaces](https://developers.google.com/workspace/chat/list-spaces). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) Lists spaces visible to the caller or authenticated user. Group chats and DMs aren't listed until the first message is sent. To list all named spaces by Google Workspace organization, use the [`spaces.search()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/search) method using Workspace administrator privileges instead.", "flatPath": "v1/spaces", "httpMethod": "GET", "id": "chat.spaces.list", @@ -443,7 +443,7 @@ ] }, "patch": { -"description": "Updates a space. For an example, see [Update a space](https://developers.google.com/workspace/chat/update-spaces). If you're updating the `displayName` field and receive the error message `ALREADY_EXISTS`, try a different display name.. An existing space within the Google Workspace organization might already use this display name. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Updates a space. For an example, see [Update a space](https://developers.google.com/workspace/chat/update-spaces). If you're updating the `displayName` field and receive the error message `ALREADY_EXISTS`, try a different display name.. An existing space within the Google Workspace organization might already use this display name. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}", "httpMethod": "PATCH", "id": "chat.spaces.patch", @@ -459,7 +459,7 @@ "type": "string" }, "updateMask": { -"description": "Required. The updated field paths, comma separated if there are multiple. You can update the following fields for a space: - `space_details` - `display_name`: Only supports updating the display name for spaces where `spaceType` field is `SPACE`. If you receive the error message `ALREADY_EXISTS`, try a different value. An existing space within the Google Workspace organization might already use this display name. - `space_type`: Only supports changing a `GROUP_CHAT` space type to `SPACE`. Include `display_name` together with `space_type` in the update mask and ensure that the specified space has a non-empty display name and the `SPACE` space type. Including the `space_type` mask and the `SPACE` type in the specified space when updating the display name is optional if the existing space already has the `SPACE` type. Trying to update the space type in other ways results in an invalid argument error. `space_type` is not supported with admin access. - `space_history_state`: Updates [space history settings](https://support.google.com/chat/answer/7664687) by turning history on or off for the space. Only supported if history settings are enabled for the Google Workspace organization. To update the space history state, you must omit all other field masks in your request. `space_history_state` is not supported with admin access. - `access_settings.audience`: Updates the [access setting](https://support.google.com/chat/answer/11971020) of who can discover the space, join the space, and preview the messages in named space where `spaceType` field is `SPACE`. If the existing space has a target audience, you can remove the audience and restrict space access by omitting a value for this field mask. To update access settings for a space, the authenticating user must be a space manager and omit all other field masks in your request. You can't update this field if the space is in [import mode](https://developers.google.com/workspace/chat/import-data-overview). To learn more, see [Make a space discoverable to specific users](https://developers.google.com/workspace/chat/space-target-audience). `access_settings.audience` is not supported with admin access. - Developer Preview: Supports changing the [permission settings](https://support.google.com/chat/answer/13340792) of a space, supported field paths include: `permission_settings.manage_members_and_groups`, `permission_settings.modify_space_details`, `permission_settings.toggle_history`, `permission_settings.use_at_mention_all`, `permission_settings.manage_apps`, `permission_settings.manage_webhooks`, `permission_settings.reply_messages` (Warning: mutually exclusive with all other non-permission settings field paths). `permission_settings` is not supported with admin access.", +"description": "- Supports changing the [permission settings](https://support.google.com/chat/answer/13340792) of a space, supported field paths include: `permission_settings.manage_members_and_groups`, `permission_settings.modify_space_details`, `permission_settings.toggle_history`, `permission_settings.use_at_mention_all`, `permission_settings.manage_apps`, `permission_settings.manage_webhooks`, `permission_settings.reply_messages` (Warning: mutually exclusive with all other non-permission settings field paths). `permission_settings` is not supported with admin access.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -551,7 +551,7 @@ "members": { "methods": { "create": { -"description": "Creates a membership for the calling Chat app, a user, or a Google Group. Creating memberships for other Chat apps isn't supported. When creating a membership, if the specified member has their auto-accept policy turned off, then they're invited, and must accept the space invitation before joining. Otherwise, creating a membership adds the member directly to the specified space. Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). For example usage, see: - [Invite or add a user to a space](https://developers.google.com/workspace/chat/create-members#create-user-membership). - [Invite or add a Google Group to a space](https://developers.google.com/workspace/chat/create-members#create-group-membership). - [Add the Chat app to a space](https://developers.google.com/workspace/chat/create-members#create-membership-calling-api).", +"description": "Creates a membership for the calling Chat app, a user, or a Google Group. Creating memberships for other Chat apps isn't supported. When creating a membership, if the specified member has their auto-accept policy turned off, then they're invited, and must accept the space invitation before joining. Otherwise, creating a membership adds the member directly to the specified space. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) For example usage, see: - [Invite or add a user to a space](https://developers.google.com/workspace/chat/create-members#create-user-membership). - [Invite or add a Google Group to a space](https://developers.google.com/workspace/chat/create-members#create-group-membership). - [Add the Chat app to a space](https://developers.google.com/workspace/chat/create-members#create-membership-calling-api).", "flatPath": "v1/spaces/{spacesId}/members", "httpMethod": "POST", "id": "chat.spaces.members.create", @@ -588,7 +588,7 @@ ] }, "delete": { -"description": "Deletes a membership. For an example, see [Remove a user or a Google Chat app from a space](https://developers.google.com/workspace/chat/delete-members). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Deletes a membership. For an example, see [Remove a user or a Google Chat app from a space](https://developers.google.com/workspace/chat/delete-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}/members/{membersId}", "httpMethod": "DELETE", "id": "chat.spaces.members.delete", @@ -622,7 +622,7 @@ ] }, "get": { -"description": "Returns details about a membership. For an example, see [Get details about a user's or Google Chat app's membership](https://developers.google.com/workspace/chat/get-members). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Returns details about a membership. For an example, see [Get details about a user's or Google Chat app's membership](https://developers.google.com/workspace/chat/get-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}/members/{membersId}", "httpMethod": "GET", "id": "chat.spaces.members.get", @@ -631,7 +631,7 @@ ], "parameters": { "name": { -"description": "Required. Resource name of the membership to retrieve. To get the app's own membership [by using user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can optionally use `spaces/{space}/members/app`. Format: `spaces/{space}/members/{member}` or `spaces/{space}/members/app` When [authenticated as a user](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can use the user's email as an alias for `{member}`. For example, `spaces/{space}/members/example@gmail.com` where `example@gmail.com` is the email of the Google Chat user.", +"description": "Required. Resource name of the membership to retrieve. To get the app's own membership [by using user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user), you can optionally use `spaces/{space}/members/app`. Format: `spaces/{space}/members/{member}` or `spaces/{space}/members/app` You can use the user's email as an alias for `{member}`. For example, `spaces/{space}/members/example@gmail.com` where `example@gmail.com` is the email of the Google Chat user.", "location": "path", "pattern": "^spaces/[^/]+/members/[^/]+$", "required": true, @@ -656,7 +656,7 @@ ] }, "list": { -"description": "Lists memberships in a space. For an example, see [List users and Google Chat apps in a space](https://developers.google.com/workspace/chat/list-members). Listing memberships with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) lists memberships in spaces that the Chat app has access to, but excludes Chat app memberships, including its own. Listing memberships with [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) lists memberships in spaces that the authenticated user has access to. Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Lists memberships in a space. For an example, see [List users and Google Chat apps in a space](https://developers.google.com/workspace/chat/list-members). Listing memberships with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) lists memberships in spaces that the Chat app has access to, but excludes Chat app memberships, including its own. Listing memberships with [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) lists memberships in spaces that the authenticated user has access to. Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}/members", "httpMethod": "GET", "id": "chat.spaces.members.list", @@ -717,7 +717,7 @@ ] }, "patch": { -"description": "Updates a membership. For an example, see [Update a user's membership in a space](https://developers.google.com/workspace/chat/update-members). Requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", +"description": "Updates a membership. For an example, see [Update a user's membership in a space](https://developers.google.com/workspace/chat/update-members). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) in [Developer Preview](https://developers.google.com/workspace/preview) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)", "flatPath": "v1/spaces/{spacesId}/members/{membersId}", "httpMethod": "PATCH", "id": "chat.spaces.members.patch", @@ -825,7 +825,7 @@ ] }, "delete": { -"description": "Deletes a message. For an example, see [Delete a message](https://developers.google.com/workspace/chat/delete-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only delete messages created by the calling Chat app.", +"description": "Deletes a message. For an example, see [Delete a message](https://developers.google.com/workspace/chat/delete-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only delete messages created by the calling Chat app.", "flatPath": "v1/spaces/{spacesId}/messages/{messagesId}", "httpMethod": "DELETE", "id": "chat.spaces.messages.delete", @@ -857,7 +857,7 @@ ] }, "get": { -"description": "Returns details about a message. For an example, see [Get details about a message](https://developers.google.com/workspace/chat/get-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). Note: Might return a message from a blocked member or space.", +"description": "Returns details about a message. For an example, see [Get details about a message](https://developers.google.com/workspace/chat/get-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) Note: Might return a message from a blocked member or space.", "flatPath": "v1/spaces/{spacesId}/messages/{messagesId}", "httpMethod": "GET", "id": "chat.spaces.messages.get", @@ -937,7 +937,7 @@ ] }, "patch": { -"description": "Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only update messages created by the calling Chat app.", +"description": "Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only update messages created by the calling Chat app.", "flatPath": "v1/spaces/{spacesId}/messages/{messagesId}", "httpMethod": "PATCH", "id": "chat.spaces.messages.patch", @@ -978,7 +978,7 @@ ] }, "update": { -"description": "Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Requires [authentication](https://developers.google.com/workspace/chat/authenticate-authorize). Supports [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) and [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user). When using app authentication, requests can only update messages created by the calling Chat app.", +"description": "Updates a message. There's a difference between the `patch` and `update` methods. The `patch` method uses a `patch` request while the `update` method uses a `put` request. We recommend using the `patch` method. For an example, see [Update a message](https://developers.google.com/workspace/chat/update-messages). Supports the following types of [authentication](https://developers.google.com/workspace/chat/authenticate-authorize): - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) When using app authentication, requests can only update messages created by the calling Chat app.", "flatPath": "v1/spaces/{spacesId}/messages/{messagesId}", "httpMethod": "PUT", "id": "chat.spaces.messages.update", @@ -1344,7 +1344,7 @@ } } }, -"revision": "20240919", +"revision": "20241001", "rootUrl": "https://chat.googleapis.com/", "schemas": { "AccessSettings": { @@ -1361,13 +1361,13 @@ "enumDescriptions": [ "Access state is unknown or not supported in this API.", "Only users or Google Groups that have been individually added or invited by other users or Google Workspace administrators can discover and access the space.", -"A space manager has granted a target audience access to the space. Users or Google Groups that have been individually added or invited to the space can also discover and access the space. To learn more, see [Make a space discoverable to specific users](https://developers.google.com/workspace/chat/space-target-audience)." +"A space manager has granted a target audience access to the space. Users or Google Groups that have been individually added or invited to the space can also discover and access the space. To learn more, see [Make a space discoverable to specific users](https://developers.google.com/workspace/chat/space-target-audience). Creating discoverable spaces requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)." ], "readOnly": true, "type": "string" }, "audience": { -"description": "Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app).", +"description": "Optional. The resource name of the [target audience](https://support.google.com/a/answer/9934697) who can discover the space, join the space, and preview the messages in the space. If unset, only users or Google Groups who have been individually invited or added to the space can access it. For details, see [Make a space discoverable to a target audience](https://developers.google.com/workspace/chat/space-target-audience). Format: `audiences/{audience}` To use the default target audience for the Google Workspace organization, set to `audiences/default`. Reading the target audience supports: - [User authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user) - [App authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app) with [administrator approval](https://support.google.com/a?p=chat-app-auth) with the `chat.app.spaces` scope in [Developer Preview](https://developers.google.com/workspace/preview). This field is not populated when using the `chat.bot` scope with [app authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-app). Setting the target audience requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", "type": "string" } }, @@ -3607,7 +3607,7 @@ }, "groupMember": { "$ref": "Group", -"description": "The Google Group the membership corresponds to." +"description": "The Google Group the membership corresponds to. Reading or mutating memberships for Google Groups requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user)." }, "member": { "$ref": "User", @@ -3993,6 +3993,61 @@ }, "type": "object" }, +"PermissionSetting": { +"description": "Represents a space permission setting.", +"id": "PermissionSetting", +"properties": { +"managersAllowed": { +"description": "Whether spaces managers have this permission.", +"type": "boolean" +}, +"membersAllowed": { +"description": "Whether non-manager members have this permission.", +"type": "boolean" +} +}, +"type": "object" +}, +"PermissionSettings": { +"description": "[Permission settings](https://support.google.com/chat/answer/13340792) for a named space. To set permission settings when creating a space, specify the `PredefinedPermissionSettings` field in your request.", +"id": "PermissionSettings", +"properties": { +"manageApps": { +"$ref": "PermissionSetting", +"description": "Setting for managing apps in a space." +}, +"manageMembersAndGroups": { +"$ref": "PermissionSetting", +"description": "Setting for managing members and groups in a space." +}, +"manageWebhooks": { +"$ref": "PermissionSetting", +"description": "Setting for managing webhooks in a space." +}, +"modifySpaceDetails": { +"$ref": "PermissionSetting", +"description": "Setting for updating space name, avatar, description and guidelines." +}, +"postMessages": { +"$ref": "PermissionSetting", +"description": "Output only. Setting for posting messages in a space.", +"readOnly": true +}, +"replyMessages": { +"$ref": "PermissionSetting", +"description": "Setting for replying to messages in a space." +}, +"toggleHistory": { +"$ref": "PermissionSetting", +"description": "Setting for toggling space history on and off." +}, +"useAtMentionAll": { +"$ref": "PermissionSetting", +"description": "Setting for using @all in a space." +} +}, +"type": "object" +}, "QuotedMessageMetadata": { "description": "Information about a quoted message.", "id": "QuotedMessageMetadata", @@ -4268,7 +4323,7 @@ "type": "boolean" }, "importMode": { -"description": "Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete.", +"description": "Optional. Whether this space is created in `Import Mode` as part of a data migration into Google Workspace. While spaces are being imported, they aren't visible to users until the import is complete. Creating a space in `Import Mode`requires [user authentication](https://developers.google.com/workspace/chat/authenticate-authorize-chat-user).", "type": "boolean" }, "lastActiveTime": { @@ -4286,6 +4341,24 @@ "description": "Resource name of the space. Format: `spaces/{space}` Where `{space}` represents the system-assigned ID for the space. You can obtain the space ID by calling the [`spaces.list()`](https://developers.google.com/workspace/chat/api/reference/rest/v1/spaces/list) method or from the space URL. For example, if the space URL is `https://mail.google.com/mail/u/0/#chat/space/AAAAAAAAA`, the space ID is `AAAAAAAAA`.", "type": "string" }, +"permissionSettings": { +"$ref": "PermissionSettings", +"description": "Optional. Exact permission settings which can be set to update the space. Input for updating a space. Otherwise, output only. For space creation, use `predefined_permission_settings` instead." +}, +"predefinedPermissionSettings": { +"description": "Optional. Input only. Space permission settings. Input for creating a space, a collaboration space is created if this field is not set. After you create the space, settings are populated in the `PermissionSettings` field.", +"enum": [ +"PREDEFINED_PERMISSION_SETTINGS_UNSPECIFIED", +"COLLABORATION_SPACE", +"ANNOUNCEMENT_SPACE" +], +"enumDescriptions": [ +"Unspecified. Don't use.", +"Setting to make the space a collaboration space where all members can post messages.", +"Setting to make the space an announcement space where only space managers can post messages." +], +"type": "string" +}, "singleUserBotDm": { "description": "Optional. Whether the space is a DM between a Chat app and a single human.", "type": "boolean" diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json index f3646a41c1c..d75f655df43 100644 --- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json @@ -9,7 +9,7 @@ "description": "See reports about devices and Chrome browsers managed within your organization" }, "https://www.googleapis.com/auth/chrome.management.telemetry.readonly": { -"description": "See basic device and telemetry information collected from Chrome OS devices or users managed within your organization" +"description": "See basic device and telemetry information collected from ChromeOS devices or users managed within your organization" } } } @@ -1172,7 +1172,7 @@ } } }, -"revision": "20240815", +"revision": "20241003", "rootUrl": "https://chromemanagement.googleapis.com/", "schemas": { "GoogleChromeManagementV1AndroidAppInfo": { @@ -3682,7 +3682,8 @@ "APPLICATION_LAUNCH_SOURCE_FIRST_RUN", "APPLICATION_LAUNCH_SOURCE_WELCOME_TOUR", "APPLICATION_LAUNCH_SOURCE_FOCUS_MODE", -"APPLICATION_LAUNCH_SOURCE_SPARKY" +"APPLICATION_LAUNCH_SOURCE_SPARKY", +"APPLICATION_LAUNCH_SOURCE_NAVIGATION_CAPTURING" ], "enumDescriptions": [ "Application launch source unknown.", @@ -3726,7 +3727,8 @@ "Count first-run Help app launches separately so that we can understand the number of user-triggered launches.", "Application launched from welcome tour.", "Applicationed launched from focus panel.", -"Application launched from experimental feature Sparky." +"Application launched from experimental feature Sparky.", +"Application launched from navigation capturing." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json index 07475aa4284..c6388b3fc27 100644 --- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json @@ -3,10 +3,10 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/chrome.management.policy": { -"description": "See, edit, create or delete policies applied to Chrome OS and Chrome Browsers managed within your organization" +"description": "See, edit, create or delete policies applied to ChromeOS and Chrome Browsers managed within your organization" }, "https://www.googleapis.com/auth/chrome.management.policy.readonly": { -"description": "See policies applied to Chrome OS and Chrome Browsers managed within your organization" +"description": "See policies applied to ChromeOS and Chrome Browsers managed within your organization" } } } @@ -557,7 +557,7 @@ } } }, -"revision": "20240729", +"revision": "20241004", "rootUrl": "https://chromepolicy.googleapis.com/", "schemas": { "GoogleChromePolicyVersionsV1AdditionalTargetKeyName": { diff --git a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json index c6b768c8ad4..8b15ba8322c 100644 --- a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json +++ b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json @@ -2289,7 +2289,7 @@ } } }, -"revision": "20240920", +"revision": "20240929", "rootUrl": "https://clouddeploy.googleapis.com/", "schemas": { "AbandonReleaseRequest": { @@ -2731,7 +2731,7 @@ true }, "policyViolation": { "$ref": "PolicyViolation", -"description": "Output only. Contains information about what policies prevented the `AutomationRun` to proceed.", +"description": "Output only. Contains information about what policies prevented the `AutomationRun` from proceeding.", "readOnly": true }, "promoteReleaseOperation": { @@ -3627,7 +3627,7 @@ true "type": "object" }, "DeployPolicy": { -"description": "A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation driven actions within a Delivery Pipeline or Target.", +"description": "A `DeployPolicy` resource in the Cloud Deploy API. A `DeployPolicy` inhibits manual or automation-driven actions within a Delivery Pipeline or Target.", "id": "DeployPolicy", "properties": { "annotations": { @@ -5517,10 +5517,31 @@ true }, "type": "object" }, +"RepairPhaseConfig": { +"description": "Configuration of the repair phase.", +"id": "RepairPhaseConfig", +"properties": { +"retry": { +"$ref": "Retry", +"description": "Optional. Retries a failed job." +}, +"rollback": { +"$ref": "Rollback", +"description": "Optional. Rolls back a `Rollout`." +} +}, +"type": "object" +}, "RepairRolloutOperation": { "description": "Contains the information for an automated `repair rollout` operation.", "id": "RepairRolloutOperation", "properties": { +"currentRepairPhaseIndex": { +"description": "Output only. The index of the current repair action in the repair sequence.", +"format": "int64", +"readOnly": true, +"type": "string" +}, "jobId": { "description": "Output only. The job ID for the Job to repair.", "readOnly": true, @@ -5566,6 +5587,51 @@ true "type": "string" }, "type": "array" +}, +"phases": { +"description": "Optional. Phases within which jobs are subject to automatic repair actions on failure. Proceeds only after phase name matched any one in the list, or for all phases if unspecified. This value must consist of lower-case letters, numbers, and hyphens, start with a letter and end with a letter or a number, and have a max length of 63 characters. In other words, it must match the following regex: `^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$`.", +"items": { +"type": "string" +}, +"type": "array" +}, +"repairPhases": { +"description": "Required. Defines the types of automatic repair phases for failed jobs.", +"items": { +"$ref": "RepairPhaseConfig" +}, +"type": "array" +} +}, +"type": "object" +}, +"Retry": { +"description": "Retries the failed job.", +"id": "Retry", +"properties": { +"attempts": { +"description": "Required. Total number of retries. Retry is skipped if set to 0; The minimum value is 1, and the maximum value is 10.", +"format": "int64", +"type": "string" +}, +"backoffMode": { +"description": "Optional. The pattern of how wait time will be increased. Default is linear. Backoff mode will be ignored if `wait` is 0.", +"enum": [ +"BACKOFF_MODE_UNSPECIFIED", +"BACKOFF_MODE_LINEAR", +"BACKOFF_MODE_EXPONENTIAL" +], +"enumDescriptions": [ +"No WaitMode is specified.", +"Increases the wait time linearly.", +"Increases the wait time exponentially." +], +"type": "string" +}, +"wait": { +"description": "Optional. How long to wait for the first retry. Default is 0, and the maximum value is 14d.", +"format": "google-duration", +"type": "string" } }, "type": "object" @@ -5681,6 +5747,21 @@ true }, "type": "object" }, +"Rollback": { +"description": "Rolls back a `Rollout`.", +"id": "Rollback", +"properties": { +"destinationPhase": { +"description": "Optional. The starting phase ID for the `Rollout`. If unspecified, the `Rollout` will start in the stable phase.", +"type": "string" +}, +"disableRollbackIfRolloutPending": { +"description": "Optional. If pending rollout exists on the target, the rollback operation will be aborted.", +"type": "boolean" +} +}, +"type": "object" +}, "RollbackAttempt": { "description": "RollbackAttempt represents an action of rolling back a Cloud Deploy 'Target'.", "id": "RollbackAttempt", @@ -5690,6 +5771,11 @@ true "readOnly": true, "type": "string" }, +"disableRollbackIfRolloutPending": { +"description": "Output only. If active rollout exists on the target, abort this rollback.", +"readOnly": true, +"type": "boolean" +}, "rolloutId": { "description": "Output only. ID of the rollback `Rollout` to create.", "readOnly": true, @@ -5794,6 +5880,11 @@ true "description": "A `Rollout` resource in the Cloud Deploy API. A `Rollout` contains information around a specific deployment to a `Target`.", "id": "Rollout", "properties": { +"activeRepairAutomationRun": { +"description": "Output only. The AutomationRun actively repairing the rollout.", +"readOnly": true, +"type": "string" +}, "annotations": { "additionalProperties": { "type": "string" @@ -6640,7 +6731,7 @@ true "type": "object" }, "TargetAttribute": { -"description": "Contains criteria for selecting Targets.", +"description": "Contains criteria for selecting Targets. This could be used to select targets for a Deploy Policy or for an Automation.", "id": "TargetAttribute", "properties": { "id": { @@ -6887,7 +6978,7 @@ true "type": "object" }, "TimeWindows": { -"description": "Time windows within which actions are restricted.", +"description": "Time windows within which actions are restricted. See the [documentation](https://cloud.google.com/deploy/docs/deploy-policy#dates_times) for more information on how to configure dates/times.", "id": "TimeWindows", "properties": { "oneTimeWindows": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json index d846dc03093..c206e6144c5 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json @@ -2015,7 +2015,7 @@ } } }, -"revision": "20240924", +"revision": "20241001", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { @@ -4806,7 +4806,7 @@ "type": "object" }, "PosixGroup": { -"description": "POSIX Group definition to represent a group in a POSIX compliant system.", +"description": "POSIX Group definition to represent a group in a POSIX compliant system. Caution: POSIX groups are deprecated. As of September 26, 2024, you can no longer create new POSIX groups. For more information, see https://cloud.google.com/identity/docs/deprecations/posix-groups", "id": "PosixGroup", "properties": { "gid": { diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json index 7e8e2d6306f..77975ce8c0e 100644 --- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json @@ -880,7 +880,7 @@ "type": "string" }, "pageSize": { -"description": "Optional. Optional limit on the number of KeyHandles to include in the response. The service may return fewer than this value. Further KeyHandles can subsequently be obtained by including the ListKeyHandlesResponse.next_page_token in a subsequent request. If unspecified, at most KeyHandles 100 will be returned.", +"description": "Optional. Optional limit on the number of KeyHandles to include in the response. The service may return fewer than this value. Further KeyHandles can subsequently be obtained by including the ListKeyHandlesResponse.next_page_token in a subsequent request. If unspecified, at most 100 KeyHandles will be returned.", "format": "int32", "location": "query", "type": "integer" @@ -2132,7 +2132,7 @@ } } }, -"revision": "20240918", +"revision": "20240926", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json index b196386c3c4..6866be40b0e 100644 --- a/googleapiclient/discovery_cache/documents/container.v1.json +++ b/googleapiclient/discovery_cache/documents/container.v1.json @@ -2540,7 +2540,7 @@ } } }, -"revision": "20240905", +"revision": "20240912", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3466,6 +3466,10 @@ "readOnly": true, "type": "string" }, +"userManagedKeysConfig": { +"$ref": "UserManagedKeysConfig", +"description": "The Custom keys configuration for the cluster." +}, "verticalPodAutoscaling": { "$ref": "VerticalPodAutoscaling", "description": "Cluster-level Vertical Pod Autoscaling configuration." @@ -3833,6 +3837,10 @@ "removedAdditionalPodRangesConfig": { "$ref": "AdditionalPodRangesConfig", "description": "The additional pod ranges that are to be removed from the cluster. The pod ranges specified here must have been specified earlier in the 'additional_pod_ranges_config' argument." +}, +"userManagedKeysConfig": { +"$ref": "UserManagedKeysConfig", +"description": "The Custom keys configuration for the cluster." } }, "type": "object" @@ -4896,7 +4904,9 @@ false "WORKLOADS", "APISERVER", "SCHEDULER", -"CONTROLLER_MANAGER" +"CONTROLLER_MANAGER", +"KCP_SSHD", +"KCP_CONNECTION" ], "enumDescriptions": [ "Default value. This shouldn't be used.", @@ -4904,7 +4914,9 @@ false "workloads", "kube-apiserver", "kube-scheduler", -"kube-controller-manager" +"kube-controller-manager", +"kcp-sshd", +"kcp connection logs" ], "type": "string" }, @@ -5421,6 +5433,21 @@ false "description": "Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard'", "type": "string" }, +"effectiveCgroupMode": { +"description": "Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version.", +"enum": [ +"EFFECTIVE_CGROUP_MODE_UNSPECIFIED", +"EFFECTIVE_CGROUP_MODE_V1", +"EFFECTIVE_CGROUP_MODE_V2" +], +"enumDescriptions": [ +"EFFECTIVE_CGROUP_MODE_UNSPECIFIED means the cgroup configuration for the node pool is unspecified, i.e. the node pool is a Windows node pool.", +"CGROUP_MODE_V1 means the node pool is configured to use cgroupv1 for the cgroup configuration.", +"CGROUP_MODE_V2 means the node pool is configured to use cgroupv2 for the cgroup configuration." +], +"readOnly": true, +"type": "string" +}, "enableConfidentialStorage": { "description": "Optional. Reserved for future use.", "type": "boolean" @@ -7889,6 +7916,51 @@ false }, "type": "object" }, +"UserManagedKeysConfig": { +"description": "UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster.", +"id": "UserManagedKeysConfig", +"properties": { +"aggregationCa": { +"description": "The Certificate Authority Service caPool to use for the aggregation CA in this cluster.", +"type": "string" +}, +"clusterCa": { +"description": "The Certificate Authority Service caPool to use for the cluster CA in this cluster.", +"type": "string" +}, +"controlPlaneDiskEncryptionKey": { +"description": "The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes.", +"type": "string" +}, +"etcdApiCa": { +"description": "Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster.", +"type": "string" +}, +"etcdPeerCa": { +"description": "Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster.", +"type": "string" +}, +"gkeopsEtcdBackupEncryptionKey": { +"description": "Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups.", +"type": "string" +}, +"serviceAccountSigningKeys": { +"description": "The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}`", +"items": { +"type": "string" +}, +"type": "array" +}, +"serviceAccountVerificationKeys": { +"description": "The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}`", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "VerticalPodAutoscaling": { "description": "VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it.", "id": "VerticalPodAutoscaling", diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json index 98cdc744a22..99202055169 100644 --- a/googleapiclient/discovery_cache/documents/container.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json @@ -2565,7 +2565,7 @@ } } }, -"revision": "20240905", +"revision": "20240912", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -3602,6 +3602,10 @@ "readOnly": true, "type": "string" }, +"userManagedKeysConfig": { +"$ref": "UserManagedKeysConfig", +"description": "The Custom keys configuration for the cluster." +}, "verticalPodAutoscaling": { "$ref": "VerticalPodAutoscaling", "description": "Cluster-level Vertical Pod Autoscaling configuration." @@ -4038,6 +4042,10 @@ "removedAdditionalPodRangesConfig": { "$ref": "AdditionalPodRangesConfig", "description": "The additional pod ranges that are to be removed from the cluster. The pod ranges specified here must have been specified earlier in the 'additional_pod_ranges_config' argument." +}, +"userManagedKeysConfig": { +"$ref": "UserManagedKeysConfig", +"description": "The Custom keys configuration for the cluster." } }, "type": "object" @@ -5248,7 +5256,9 @@ false "WORKLOADS", "APISERVER", "SCHEDULER", -"CONTROLLER_MANAGER" +"CONTROLLER_MANAGER", +"KCP_SSHD", +"KCP_CONNECTION" ], "enumDescriptions": [ "Default value. This shouldn't be used.", @@ -5256,7 +5266,9 @@ false "workloads", "kube-apiserver", "kube-scheduler", -"kube-controller-manager" +"kube-controller-manager", +"kcp-sshd", +"kcp connection logs" ], "type": "string" }, @@ -5810,6 +5822,21 @@ false "description": "Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced') If unspecified, the default disk type is 'pd-standard'", "type": "string" }, +"effectiveCgroupMode": { +"description": "Output only. effective_cgroup_mode is the cgroup mode actually used by the node pool. It is determined by the cgroup mode specified in the LinuxNodeConfig or the default cgroup mode based on the cluster creation version.", +"enum": [ +"EFFECTIVE_CGROUP_MODE_UNSPECIFIED", +"EFFECTIVE_CGROUP_MODE_V1", +"EFFECTIVE_CGROUP_MODE_V2" +], +"enumDescriptions": [ +"EFFECTIVE_CGROUP_MODE_UNSPECIFIED means the cgroup configuration for the node pool is unspecified, i.e. the node pool is a Windows node pool.", +"CGROUP_MODE_V1 means the node pool is configured to use cgroupv1 for the cgroup configuration.", +"CGROUP_MODE_V2 means the node pool is configured to use cgroupv2 for the cgroup configuration." +], +"readOnly": true, +"type": "string" +}, "enableConfidentialStorage": { "description": "Optional. Reserved for future use.", "type": "boolean" @@ -8402,6 +8429,51 @@ false }, "type": "object" }, +"UserManagedKeysConfig": { +"description": "UserManagedKeysConfig holds the resource address to Keys which are used for signing certs and token that are used for communication within cluster.", +"id": "UserManagedKeysConfig", +"properties": { +"aggregationCa": { +"description": "The Certificate Authority Service caPool to use for the aggregation CA in this cluster.", +"type": "string" +}, +"clusterCa": { +"description": "The Certificate Authority Service caPool to use for the cluster CA in this cluster.", +"type": "string" +}, +"controlPlaneDiskEncryptionKey": { +"description": "The Cloud KMS cryptoKey to use for Confidential Hyperdisk on the control plane nodes.", +"type": "string" +}, +"etcdApiCa": { +"description": "Resource path of the Certificate Authority Service caPool to use for the etcd API CA in this cluster.", +"type": "string" +}, +"etcdPeerCa": { +"description": "Resource path of the Certificate Authority Service caPool to use for the etcd peer CA in this cluster.", +"type": "string" +}, +"gkeopsEtcdBackupEncryptionKey": { +"description": "Resource path of the Cloud KMS cryptoKey to use for encryption of internal etcd backups.", +"type": "string" +}, +"serviceAccountSigningKeys": { +"description": "The Cloud KMS cryptoKeyVersions to use for signing service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}`", +"items": { +"type": "string" +}, +"type": "array" +}, +"serviceAccountVerificationKeys": { +"description": "The Cloud KMS cryptoKeyVersions to use for verifying service account JWTs issued by this cluster. Format: `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{cryptoKey}/cryptoKeyVersions/{cryptoKeyVersion}`", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "VerticalPodAutoscaling": { "description": "VerticalPodAutoscaling contains global, per-cluster information required by Vertical Pod Autoscaler to automatically adjust the resources of pods controlled by it.", "id": "VerticalPodAutoscaling", diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json index 2b1740eda2d..982436b866d 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json @@ -2218,7 +2218,7 @@ } } }, -"revision": "20240914", +"revision": "20240930", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AlloyDbConnectionProfile": { @@ -2486,6 +2486,21 @@ }, "type": "object" }, +"BinaryLogParser": { +"description": "Configuration to use Binary Log Parser CDC technique.", +"id": "BinaryLogParser", +"properties": { +"logFileDirectories": { +"$ref": "LogFileDirectories", +"description": "Use Oracle directories." +}, +"oracleAsmLogFileAccess": { +"$ref": "OracleAsmLogFileAccess", +"description": "Use Oracle ASM." +} +}, +"type": "object" +}, "Binding": { "description": "Associates `members`, or principals, with a `role`.", "id": "Binding", @@ -4322,6 +4337,27 @@ }, "type": "object" }, +"LogFileDirectories": { +"description": "Configuration to specify the Oracle directories to access the log files.", +"id": "LogFileDirectories", +"properties": { +"archivedLogDirectory": { +"description": "Required. Oracle directory for archived logs.", +"type": "string" +}, +"onlineLogDirectory": { +"description": "Required. Oracle directory for online logs.", +"type": "string" +} +}, +"type": "object" +}, +"LogMiner": { +"description": "Configuration to use LogMiner CDC method.", +"id": "LogMiner", +"properties": {}, +"type": "object" +}, "MachineConfig": { "description": "MachineConfig describes the configuration of a machine.", "id": "MachineConfig", @@ -4602,6 +4638,10 @@ "description": "The name (URI) of this migration job resource, in the form of: projects/{project}/locations/{location}/migrationJobs/{migrationJob}.", "type": "string" }, +"oracleToPostgresConfig": { +"$ref": "OracleToPostgresConfig", +"description": "Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations." +}, "performanceConfig": { "$ref": "PerformanceConfig", "description": "Optional. Data dump parallelism settings used by the migration." @@ -4991,6 +5031,12 @@ }, "type": "object" }, +"OracleAsmLogFileAccess": { +"description": "Configuration to use Oracle ASM to access the log files.", +"id": "OracleAsmLogFileAccess", +"properties": {}, +"type": "object" +}, "OracleConnectionProfile": { "description": "Specifies connection parameters required specifically for Oracle databases.", "id": "OracleConnectionProfile", @@ -5044,6 +5090,55 @@ }, "type": "object" }, +"OracleSourceConfig": { +"description": "Configuration for Oracle as a source in a migration.", +"id": "OracleSourceConfig", +"properties": { +"binaryLogParser": { +"$ref": "BinaryLogParser", +"description": "Use Binary Log Parser." +}, +"cdcStartPosition": { +"description": "Optional. The schema change number (SCN) to start CDC data migration from.", +"format": "int64", +"type": "string" +}, +"logMiner": { +"$ref": "LogMiner", +"description": "Use LogMiner." +}, +"maxConcurrentCdcConnections": { +"description": "Optional. Maximum number of connections Database Migration Service will open to the source for CDC phase.", +"format": "int32", +"type": "integer" +}, +"maxConcurrentFullDumpConnections": { +"description": "Optional. Maximum number of connections Database Migration Service will open to the source for full dump phase.", +"format": "int32", +"type": "integer" +}, +"skipFullDump": { +"description": "Optional. Whether to skip full dump or not.", +"type": "boolean" +} +}, +"type": "object" +}, +"OracleToPostgresConfig": { +"description": "Configuration for heterogeneous **Oracle to Cloud SQL for PostgreSQL** and **Oracle to AlloyDB for PostgreSQL** migrations.", +"id": "OracleToPostgresConfig", +"properties": { +"oracleSourceConfig": { +"$ref": "OracleSourceConfig", +"description": "Optional. Configuration for Oracle source." +}, +"postgresDestinationConfig": { +"$ref": "PostgresDestinationConfig", +"description": "Optional. Configuration for Postgres destination." +} +}, +"type": "object" +}, "PackageEntity": { "description": "Package's parent is a schema.", "id": "PackageEntity", @@ -5212,6 +5307,23 @@ }, "type": "object" }, +"PostgresDestinationConfig": { +"description": "Configuration for Postgres as a destination in a migration.", +"id": "PostgresDestinationConfig", +"properties": { +"maxConcurrentConnections": { +"description": "Optional. Maximum number of connections Database Migration Service will open to the destination for data migration.", +"format": "int32", +"type": "integer" +}, +"transactionTimeout": { +"description": "Optional. Timeout for data migration transactions.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, "PrimaryInstanceSettings": { "description": "Settings for the cluster's primary instance", "id": "PrimaryInstanceSettings", diff --git a/googleapiclient/discovery_cache/documents/dataplex.v1.json b/googleapiclient/discovery_cache/documents/dataplex.v1.json index d5effb20a6b..82ef0efa911 100644 --- a/googleapiclient/discovery_cache/documents/dataplex.v1.json +++ b/googleapiclient/discovery_cache/documents/dataplex.v1.json @@ -2480,7 +2480,7 @@ "type": "boolean" }, "aspectKeys": { -"description": "Optional. The map keys of the Aspects which the service should modify. It supports the following syntaxes: - matches an aspect of the given type and empty path. @path - matches an aspect of the given type and specified path. * - matches aspects of the given type for all paths. *@path - matches aspects of all types on the given path.The service will not remove existing aspects matching the syntax unless delete_missing_aspects is set to true.If this field is left empty, the service treats it as specifying exactly those Aspects present in the request.", +"description": "Optional. The map keys of the Aspects which the service should modify. It supports the following syntaxes: - matches an aspect of the given type and empty path. @path - matches an aspect of the given type and specified path. For example, to attach an aspect to a field that is specified by the schema aspect, the path should have the format Schema.. * - matches aspects of the given type for all paths. *@path - matches aspects of all types on the given path.The service will not remove existing aspects matching the syntax unless delete_missing_aspects is set to true.If this field is left empty, the service treats it as specifying exactly those Aspects present in the request.", "location": "query", "repeated": true, "type": "string" @@ -5997,7 +5997,7 @@ } } }, -"revision": "20240914", +"revision": "20240925", "rootUrl": "https://dataplex.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index 183de5022fe..01574e47a9a 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -33,6 +33,61 @@ }, { "description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-central2.rep.googleapis.com/", +"location": "us-central2" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-east1.rep.googleapis.com/", +"location": "us-east1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-east4.rep.googleapis.com/", +"location": "us-east4" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-east5.rep.googleapis.com/", +"location": "us-east5" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-east7.rep.googleapis.com/", +"location": "us-east7" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-south1.rep.googleapis.com/", +"location": "us-south1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-west1.rep.googleapis.com/", +"location": "us-west1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-west2.rep.googleapis.com/", +"location": "us-west2" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-west3.rep.googleapis.com/", +"location": "us-west3" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.us-west8.rep.googleapis.com/", +"location": "us-west8" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://dataproc.europe-west8.rep.googleapis.com/", +"location": "europe-west8" +}, +{ +"description": "Regional Endpoint", "endpointUrl": "https://dataproc.me-central2.rep.googleapis.com/", "location": "me-central2" } @@ -525,795 +580,1094 @@ "https://www.googleapis.com/auth/cloud-platform" ] } -} }, -"operations": { +"resources": { +"sparkApplications": { "methods": { -"cancel": { -"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", -"httpMethod": "POST", -"id": "dataproc.projects.locations.operations.cancel", +"access": { +"description": "Obtain high level information corresponding to a single Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:access", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.access", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource to be cancelled.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" } }, -"path": "v1/{+name}:cancel", +"path": "v1/{+name}:access", "response": { -"$ref": "Empty" +"$ref": "AccessSparkApplicationResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.locations.operations.delete", +"accessEnvironmentInfo": { +"description": "Obtain environment details for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessEnvironmentInfo", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.accessEnvironmentInfo", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource to be deleted.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:accessEnvironmentInfo", "response": { -"$ref": "Empty" +"$ref": "AccessSparkApplicationEnvironmentInfoResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"accessJob": { +"description": "Obtain data corresponding to a spark job for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessJob", "httpMethod": "GET", -"id": "dataproc.projects.locations.operations.get", +"id": "dataproc.projects.locations.batches.sparkApplications.accessJob", "parameterOrder": [ "name" ], "parameters": { +"jobId": { +"description": "Required. Job ID to fetch data for.", +"format": "int64", +"location": "query", +"type": "string" +}, "name": { -"description": "The name of the operation resource.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:accessJob", "response": { -"$ref": "Operation" +"$ref": "AccessSparkApplicationJobResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", +"accessSqlPlan": { +"description": "Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessSqlPlan", "httpMethod": "GET", -"id": "dataproc.projects.locations.operations.list", +"id": "dataproc.projects.locations.batches.sparkApplications.accessSqlPlan", "parameterOrder": [ "name" ], "parameters": { -"filter": { -"description": "The standard list filter.", +"executionId": { +"description": "Required. Execution ID", +"format": "int64", "location": "query", "type": "string" }, "name": { -"description": "The name of the operation's parent resource.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/operations$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"pageSize": { -"description": "The standard list page size.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "The standard list page token.", +"parent": { +"description": "Required. Parent (Batch) resource reference.", "location": "query", "type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:accessSqlPlan", "response": { -"$ref": "ListOperationsResponse" +"$ref": "AccessSparkApplicationSqlSparkPlanGraphResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} -} }, -"sessionTemplates": { -"methods": { -"create": { -"description": "Create a session template synchronously.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", -"httpMethod": "POST", -"id": "dataproc.projects.locations.sessionTemplates.create", +"accessSqlQuery": { +"description": "Obtain data corresponding to a particular SQL Query for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessSqlQuery", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.accessSqlQuery", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The parent resource where this session template will be created.", +"details": { +"description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", +"location": "query", +"type": "boolean" +}, +"executionId": { +"description": "Required. Execution ID", +"format": "int64", +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+parent}/sessionTemplates", -"request": { -"$ref": "SessionTemplate" +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"planDescription": { +"description": "Optional. Enables/ disables physical plan description on demand", +"location": "query", +"type": "boolean" +} }, +"path": "v1/{+name}:accessSqlQuery", "response": { -"$ref": "SessionTemplate" +"$ref": "AccessSparkApplicationSqlQueryResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes a session template.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.locations.sessionTemplates.delete", +"accessStageAttempt": { +"description": "Obtain data corresponding to a spark stage attempt for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessStageAttempt", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.accessStageAttempt", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the session template resource to delete.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"stageAttemptId": { +"description": "Required. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +}, +"summaryMetricsMask": { +"description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", +"format": "google-fieldmask", +"location": "query", +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:accessStageAttempt", "response": { -"$ref": "Empty" +"$ref": "AccessSparkApplicationStageAttemptResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the resource representation for a session template.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", +"accessStageRddGraph": { +"description": "Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:accessStageRddGraph", "httpMethod": "GET", -"id": "dataproc.projects.locations.sessionTemplates.get", +"id": "dataproc.projects.locations.batches.sparkApplications.accessStageRddGraph", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the session template to retrieve.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:accessStageRddGraph", "response": { -"$ref": "SessionTemplate" +"$ref": "AccessSparkApplicationStageRddOperationGraphResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists session templates.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", +"search": { +"description": "Obtain high level information and list of Spark Applications corresponding to a batch", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications:search", "httpMethod": "GET", -"id": "dataproc.projects.locations.sessionTemplates.list", +"id": "dataproc.projects.locations.batches.sparkApplications.search", "parameterOrder": [ "parent" ], "parameters": { -"filter": { -"description": "Optional. A filter for the session templates to return in the response. Filters are case sensitive and have the following syntax:field = value AND field = value ...", +"applicationStatus": { +"description": "Optional. Search only applications in the chosen state.", +"enum": [ +"APPLICATION_STATUS_UNSPECIFIED", +"APPLICATION_STATUS_RUNNING", +"APPLICATION_STATUS_COMPLETED" +], +"enumDescriptions": [ +"", +"", +"" +], +"location": "query", +"type": "string" +}, +"maxEndTime": { +"description": "Optional. Latest end timestamp to list.", +"format": "google-datetime", +"location": "query", +"type": "string" +}, +"maxTime": { +"description": "Optional. Latest start timestamp to list.", +"format": "google-datetime", +"location": "query", +"type": "string" +}, +"minEndTime": { +"description": "Optional. Earliest end timestamp to list.", +"format": "google-datetime", +"location": "query", +"type": "string" +}, +"minTime": { +"description": "Optional. Earliest start timestamp to list.", +"format": "google-datetime", "location": "query", "type": "string" }, "pageSize": { -"description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", +"description": "Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", +"description": "Optional. A page token received from a previous SearchSparkApplications call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, "parent": { -"description": "Required. The parent that owns this collection of session templates.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/sessionTemplates", +"path": "v1/{+parent}/sparkApplications:search", "response": { -"$ref": "ListSessionTemplatesResponse" +"$ref": "SearchSparkApplicationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"patch": { -"description": "Updates the session template synchronously.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", -"httpMethod": "PATCH", -"id": "dataproc.projects.locations.sessionTemplates.patch", +"searchExecutorStageSummary": { +"description": "Obtain executor summary with respect to a spark stage attempt.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchExecutorStageSummary", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.searchExecutorStageSummary", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The resource name of the session template.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+name}", -"request": { -"$ref": "SessionTemplate" +"pageSize": { +"description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"stageAttemptId": { +"description": "Required. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:searchExecutorStageSummary", "response": { -"$ref": "SessionTemplate" +"$ref": "SearchSparkApplicationExecutorStageSummaryResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} -} }, -"sessions": { -"methods": { -"create": { -"description": "Create an interactive session asynchronously.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", -"httpMethod": "POST", -"id": "dataproc.projects.locations.sessions.create", +"searchExecutors": { +"description": "Obtain data corresponding to executors for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchExecutors", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.searchExecutors", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The parent resource where this session will be created.", +"executorStatus": { +"description": "Optional. Filter to select whether active/ dead or all executors should be selected.", +"enum": [ +"EXECUTOR_STATUS_UNSPECIFIED", +"EXECUTOR_STATUS_ACTIVE", +"EXECUTOR_STATUS_DEAD" +], +"enumDescriptions": [ +"", +"", +"" +], +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"pageSize": { +"description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous AccessSparkApplicationExecutorsList call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, -"sessionId": { -"description": "Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.", +"parent": { +"description": "Required. Parent (Batch) resource reference.", "location": "query", "type": "string" } }, -"path": "v1/{+parent}/sessions", -"request": { -"$ref": "Session" -}, +"path": "v1/{+name}:searchExecutors", "response": { -"$ref": "Operation" +"$ref": "SearchSparkApplicationExecutorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes the interactive session resource. If the session is not in terminal state, it is terminated, and then deleted.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.locations.sessions.delete", +"searchJobs": { +"description": "Obtain list of spark jobs corresponding to a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchJobs", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.searchJobs", "parameterOrder": [ "name" ], "parameters": { +"jobStatus": { +"description": "Optional. List only jobs in the specific state.", +"enum": [ +"JOB_EXECUTION_STATUS_UNSPECIFIED", +"JOB_EXECUTION_STATUS_RUNNING", +"JOB_EXECUTION_STATUS_SUCCEEDED", +"JOB_EXECUTION_STATUS_FAILED", +"JOB_EXECUTION_STATUS_UNKNOWN" +], +"enumDescriptions": [ +"", +"", +"", +"", +"" +], +"location": "query", +"type": "string" +}, "name": { -"description": "Required. The name of the session resource to delete.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the service receives two DeleteSessionRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s with the same ID, the second request is ignored.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"pageSize": { +"description": "Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSparkApplicationJobs call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", "location": "query", "type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:searchJobs", "response": { -"$ref": "Operation" +"$ref": "SearchSparkApplicationJobsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the resource representation for an interactive session.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", +"searchSqlQueries": { +"description": "Obtain data corresponding to SQL Queries for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchSqlQueries", "httpMethod": "GET", -"id": "dataproc.projects.locations.sessions.get", +"id": "dataproc.projects.locations.batches.sparkApplications.searchSqlQueries", "parameterOrder": [ "name" ], "parameters": { +"details": { +"description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", +"location": "query", +"type": "boolean" +}, "name": { -"description": "Required. The name of the session to retrieve.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"pageSize": { +"description": "Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"planDescription": { +"description": "Optional. Enables/ disables physical plan description on demand", +"location": "query", +"type": "boolean" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:searchSqlQueries", "response": { -"$ref": "Session" +"$ref": "SearchSparkApplicationSqlQueriesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists interactive sessions.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", +"searchStageAttemptTasks": { +"description": "Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchStageAttemptTasks", "httpMethod": "GET", -"id": "dataproc.projects.locations.sessions.list", +"id": "dataproc.projects.locations.batches.sparkApplications.searchStageAttemptTasks", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"filter": { -"description": "Optional. A filter for the sessions to return in the response.A filter is a logical expression constraining the values of various fields in each session resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND, OR). Supported fields are session_id, session_uuid, state, create_time, and labels.Example: state = ACTIVE and create_time < \"2023-01-01T00:00:00Z\" is a filter for sessions in an ACTIVE state that were created before 2023-01-01. state = ACTIVE and labels.environment=production is a filter for sessions in an ACTIVE state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparators.", -"location": "query", +"name": { +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", +"required": true, "type": "string" }, "pageSize": { -"description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", +"description": "Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", +"description": "Optional. A page token received from a previous ListSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, "parent": { -"description": "Required. The parent, which owns this collection of sessions.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", -"required": true, +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"sortRuntime": { +"description": "Optional. Sort the tasks by runtime.", +"location": "query", +"type": "boolean" +}, +"stageAttemptId": { +"description": "Optional. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Optional. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +}, +"taskStatus": { +"description": "Optional. List only tasks in the state.", +"enum": [ +"TASK_STATUS_UNSPECIFIED", +"TASK_STATUS_RUNNING", +"TASK_STATUS_SUCCESS", +"TASK_STATUS_FAILED", +"TASK_STATUS_KILLED", +"TASK_STATUS_PENDING" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"" +], +"location": "query", "type": "string" } }, -"path": "v1/{+parent}/sessions", +"path": "v1/{+name}:searchStageAttemptTasks", "response": { -"$ref": "ListSessionsResponse" +"$ref": "SearchSparkApplicationStageAttemptTasksResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"terminate": { -"description": "Terminates the interactive session.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}:terminate", -"httpMethod": "POST", -"id": "dataproc.projects.locations.sessions.terminate", +"searchStageAttempts": { +"description": "Obtain data corresponding to a spark stage attempts for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchStageAttempts", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.searchStageAttempts", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the session resource to terminate.", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+name}:terminate", -"request": { -"$ref": "TerminateSessionRequest" +"pageSize": { +"description": "Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"stageId": { +"description": "Required. Stage ID for which attempts are to be fetched", +"format": "int64", +"location": "query", +"type": "string" +}, +"summaryMetricsMask": { +"description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:searchStageAttempts", "response": { -"$ref": "Operation" +"$ref": "SearchSparkApplicationStageAttemptsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} -} }, -"workflowTemplates": { -"methods": { -"create": { -"description": "Creates new workflow template.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", -"httpMethod": "POST", -"id": "dataproc.projects.locations.workflowTemplates.create", +"searchStages": { +"description": "Obtain data corresponding to stages for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:searchStages", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.searchStages", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"name": { +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+parent}/workflowTemplates", -"request": { -"$ref": "WorkflowTemplate" +"pageSize": { +"description": "Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous FetchSparkApplicationStagesList call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +}, +"stageStatus": { +"description": "Optional. List only stages in the given state.", +"enum": [ +"STAGE_STATUS_UNSPECIFIED", +"STAGE_STATUS_ACTIVE", +"STAGE_STATUS_COMPLETE", +"STAGE_STATUS_FAILED", +"STAGE_STATUS_PENDING", +"STAGE_STATUS_SKIPPED" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"" +], +"location": "query", +"type": "string" +}, +"summaryMetricsMask": { +"description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:searchStages", "response": { -"$ref": "WorkflowTemplate" +"$ref": "SearchSparkApplicationStagesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes a workflow template. It does not cancel in-progress workflows.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.locations.workflowTemplates.delete", +"summarizeExecutors": { +"description": "Obtain summary of Executor Summary for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeExecutors", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.summarizeExecutors", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"version": { -"description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", -"format": "int32", +"parent": { +"description": "Required. Parent (Batch) resource reference.", "location": "query", -"type": "integer" +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:summarizeExecutors", "response": { -"$ref": "Empty" +"$ref": "SummarizeSparkApplicationExecutorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", +"summarizeJobs": { +"description": "Obtain summary of Jobs for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeJobs", "httpMethod": "GET", -"id": "dataproc.projects.locations.workflowTemplates.get", +"id": "dataproc.projects.locations.batches.sparkApplications.summarizeJobs", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"version": { -"description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", -"format": "int32", +"parent": { +"description": "Required. Parent (Batch) resource reference.", "location": "query", -"type": "integer" +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:summarizeJobs", "response": { -"$ref": "WorkflowTemplate" +"$ref": "SummarizeSparkApplicationJobsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.locations.workflowTemplates.getIamPolicy", +"summarizeStageAttemptTasks": { +"description": "Obtain summary of Tasks for a Spark Application Stage Attempt", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeStageAttemptTasks", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.summarizeStageAttemptTasks", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+resource}:getIamPolicy", -"request": { -"$ref": "GetIamPolicyRequest" +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" }, +"stageAttemptId": { +"description": "Required. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}:summarizeStageAttemptTasks", "response": { -"$ref": "Policy" +"$ref": "SummarizeSparkApplicationStageAttemptTasksResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"instantiate": { -"description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:instantiate", -"httpMethod": "POST", -"id": "dataproc.projects.locations.workflowTemplates.instantiate", +"summarizeStages": { +"description": "Obtain summary of Stages for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:summarizeStages", +"httpMethod": "GET", +"id": "dataproc.projects.locations.batches.sparkApplications.summarizeStages", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "Required. The fully qualified name of the batch to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+name}:instantiate", -"request": { -"$ref": "InstantiateWorkflowTemplateRequest" +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:summarizeStages", "response": { -"$ref": "Operation" +"$ref": "SummarizeSparkApplicationStagesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"instantiateInline": { -"description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates:instantiateInline", +"write": { +"description": "Write wrapper objects from dataplane to spanner", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}/sparkApplications/{sparkApplicationsId}:write", "httpMethod": "POST", -"id": "dataproc.projects.locations.workflowTemplates.instantiateInline", +"id": "dataproc.projects.locations.batches.sparkApplications.write", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"name": { +"description": "Required. The fully qualified name of the spark application to write data about in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/batches/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -}, -"requestId": { -"description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"location": "query", -"type": "string" } }, -"path": "v1/{+parent}/workflowTemplates:instantiateInline", +"path": "v1/{+name}:write", "request": { -"$ref": "WorkflowTemplate" +"$ref": "WriteSparkApplicationContextRequest" }, "response": { -"$ref": "Operation" +"$ref": "WriteSparkApplicationContextResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +} +} +} +} }, -"list": { -"description": "Lists workflows that match the specified filter in the request.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", -"httpMethod": "GET", -"id": "dataproc.projects.locations.workflowTemplates.list", +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "dataproc.projects.locations.operations.cancel", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"pageSize": { -"description": "Optional. The maximum number of results to return in each response.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "Optional. The page token, returned by a previous call, to request the next page of results.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"name": { +"description": "The name of the operation resource to be cancelled.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/workflowTemplates", +"path": "v1/{+name}:cancel", "response": { -"$ref": "ListWorkflowTemplatesResponse" +"$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"setIamPolicy": { -"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.locations.operations.delete", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "The name of the operation resource to be deleted.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+resource}:setIamPolicy", -"request": { -"$ref": "SetIamPolicyRequest" -}, +"path": "v1/{+name}", "response": { -"$ref": "Policy" +"$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"testIamPermissions": { -"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", -"httpMethod": "POST", -"id": "dataproc.projects.locations.workflowTemplates.testIamPermissions", +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "dataproc.projects.locations.operations.get", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "The name of the operation resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+resource}:testIamPermissions", -"request": { -"$ref": "TestIamPermissionsRequest" -}, +"path": "v1/{+name}", "response": { -"$ref": "TestIamPermissionsResponse" +"$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"update": { -"description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", -"httpMethod": "PUT", -"id": "dataproc.projects.locations.workflowTemplates.update", +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "dataproc.projects.locations.operations.list", "parameterOrder": [ "name" ], "parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, "name": { -"description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "The name of the operation's parent resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/operations$", "required": true, "type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" } }, "path": "v1/{+name}", -"request": { -"$ref": "WorkflowTemplate" -}, "response": { -"$ref": "WorkflowTemplate" +"$ref": "ListOperationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } -} -} }, -"regions": { -"resources": { -"autoscalingPolicies": { +"sessionTemplates": { "methods": { "create": { -"description": "Creates new autoscaling policy.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", +"description": "Create a session template synchronously.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", "httpMethod": "POST", -"id": "dataproc.projects.regions.autoscalingPolicies.create", +"id": "dataproc.projects.locations.sessionTemplates.create", "parameterOrder": [ "parent" ], "parameters": { "parent": { -"description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"description": "Required. The parent resource where this session template will be created.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/autoscalingPolicies", +"path": "v1/{+parent}/sessionTemplates", "request": { -"$ref": "AutoscalingPolicy" +"$ref": "SessionTemplate" }, "response": { -"$ref": "AutoscalingPolicy" +"$ref": "SessionTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "delete": { -"description": "Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", +"description": "Deletes a session template.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", "httpMethod": "DELETE", -"id": "dataproc.projects.regions.autoscalingPolicies.delete", +"id": "dataproc.projects.locations.sessionTemplates.delete", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", +"description": "Required. The name of the session template resource to delete.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", "required": true, "type": "string" } @@ -1327,323 +1681,257 @@ ] }, "get": { -"description": "Retrieves autoscaling policy.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", +"description": "Gets the resource representation for a session template.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", "httpMethod": "GET", -"id": "dataproc.projects.regions.autoscalingPolicies.get", +"id": "dataproc.projects.locations.sessionTemplates.get", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", +"description": "Required. The name of the session template to retrieve.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { -"$ref": "AutoscalingPolicy" +"$ref": "SessionTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:getIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.regions.autoscalingPolicies.getIamPolicy", +"list": { +"description": "Lists session templates.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessionTemplates.list", "parameterOrder": [ -"resource" +"parent" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", -"location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", -"required": true, +"filter": { +"description": "Optional. A filter for the session templates to return in the response. Filters are case sensitive and have the following syntax:field = value AND field = value ...", +"location": "query", "type": "string" -} -}, -"path": "v1/{+resource}:getIamPolicy", -"request": { -"$ref": "GetIamPolicyRequest" }, -"response": { -"$ref": "Policy" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"list": { -"description": "Lists autoscaling policies in the project.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", -"httpMethod": "GET", -"id": "dataproc.projects.regions.autoscalingPolicies.list", -"parameterOrder": [ -"parent" -], -"parameters": { "pageSize": { -"description": "Optional. The maximum number of results to return in each response. Must be less than or equal to 1000. Defaults to 100.", +"description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "Optional. The page token, returned by a previous call, to request the next page of results.", +"description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, "parent": { -"description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"description": "Required. The parent that owns this collection of session templates.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/autoscalingPolicies", +"path": "v1/{+parent}/sessionTemplates", "response": { -"$ref": "ListAutoscalingPoliciesResponse" +"$ref": "ListSessionTemplatesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"setIamPolicy": { -"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", +"patch": { +"description": "Updates the session template synchronously.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessionTemplates/{sessionTemplatesId}", +"httpMethod": "PATCH", +"id": "dataproc.projects.locations.sessionTemplates.patch", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "Required. The resource name of the session template.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessionTemplates/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+resource}:setIamPolicy", +"path": "v1/{+name}", "request": { -"$ref": "SetIamPolicyRequest" +"$ref": "SessionTemplate" }, "response": { -"$ref": "Policy" +"$ref": "SessionTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +} +} }, -"testIamPermissions": { -"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:testIamPermissions", +"sessions": { +"methods": { +"create": { +"description": "Create an interactive session asynchronously.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", "httpMethod": "POST", -"id": "dataproc.projects.regions.autoscalingPolicies.testIamPermissions", +"id": "dataproc.projects.locations.sessions.create", "parameterOrder": [ -"resource" +"parent" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"parent": { +"description": "Required. The parent resource where this session will be created.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the service receives two CreateSessionRequests (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s with the same ID, the second request is ignored, and the first Session is created and stored in the backend.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" +}, +"sessionId": { +"description": "Required. The ID to use for the session, which becomes the final component of the session's resource name.This value must be 4-63 characters. Valid characters are /a-z-/.", +"location": "query", +"type": "string" } }, -"path": "v1/{+resource}:testIamPermissions", +"path": "v1/{+parent}/sessions", "request": { -"$ref": "TestIamPermissionsRequest" +"$ref": "Session" }, "response": { -"$ref": "TestIamPermissionsResponse" +"$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"update": { -"description": "Updates (replaces) autoscaling policy.Disabled check for update_mask, because all updates will be full replacements.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", -"httpMethod": "PUT", -"id": "dataproc.projects.regions.autoscalingPolicies.update", +"delete": { +"description": "Deletes the interactive session resource. If the session is not in terminal state, it is terminated, and then deleted.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.locations.sessions.delete", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", +"description": "Required. The name of the session resource to delete.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", "required": true, "type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the service receives two DeleteSessionRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s with the same ID, the second request is ignored.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" } }, "path": "v1/{+name}", -"request": { -"$ref": "AutoscalingPolicy" -}, "response": { -"$ref": "AutoscalingPolicy" +"$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} -} }, -"clusters": { -"methods": { -"create": { -"description": "Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.create", +"get": { +"description": "Gets the resource representation for an interactive session.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.get", "parameterOrder": [ -"projectId", -"region" +"name" ], "parameters": { -"actionOnFailedPrimaryWorkers": { -"description": "Optional. Failure action when primary worker creation fails.", -"enum": [ -"FAILURE_ACTION_UNSPECIFIED", -"NO_ACTION", -"DELETE" -], -"enumDescriptions": [ -"When FailureAction is unspecified, failure action defaults to NO_ACTION.", -"Take no action on failure to create a cluster resource. NO_ACTION is the default.", -"Delete the failed cluster resource." -], -"location": "query", -"type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", -"location": "path", -"required": true, -"type": "string" -}, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"name": { +"description": "Required. The name of the session to retrieve.", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", "required": true, "type": "string" -}, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"location": "query", -"type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/clusters", -"request": { -"$ref": "Cluster" -}, +"path": "v1/{+name}", "response": { -"$ref": "Operation" +"$ref": "Session" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", -"httpMethod": "DELETE", -"id": "dataproc.projects.regions.clusters.delete", +"list": { +"description": "Lists interactive sessions.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.list", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"parent" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", -"location": "path", -"required": true, -"type": "string" -}, -"clusterUuid": { -"description": "Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist.", +"filter": { +"description": "Optional. A filter for the sessions to return in the response.A filter is a logical expression constraining the values of various fields in each session resource. Filters are case sensitive, and may contain multiple clauses combined with logical operators (AND, OR). Supported fields are session_id, session_uuid, state, create_time, and labels.Example: state = ACTIVE and create_time < \"2023-01-01T00:00:00Z\" is a filter for sessions in an ACTIVE state that were created before 2023-01-01. state = ACTIVE and labels.environment=production is a filter for sessions in an ACTIVE state that have a production environment label.See https://google.aip.dev/assets/misc/ebnf-filtering.txt for a detailed description of the filter syntax and a list of supported comparators.", "location": "query", "type": "string" }, -"gracefulTerminationTimeout": { -"description": "Optional. The graceful termination timeout for the deletion of the cluster. Indicate the time the request will wait to complete the running jobs on the cluster before its forceful deletion. Default value is 0 indicating that the user has not enabled the graceful termination. Value can be between 60 second and 6 Hours, in case the graceful termination is enabled. (There is no separate flag to check the enabling or disabling of graceful termination, it can be checked by the values in the field).", -"format": "google-duration", +"pageSize": { +"description": "Optional. The maximum number of sessions to return in each response. The service may return fewer than this value.", +"format": "int32", "location": "query", -"type": "string" +"type": "integer" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", -"location": "path", -"required": true, +"pageToken": { +"description": "Optional. A page token received from a previous ListSessions call. Provide this token to retrieve the subsequent page.", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"parent": { +"description": "Required. The parent, which owns this collection of sessions.", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" -}, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"location": "query", -"type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"path": "v1/{+parent}/sessions", "response": { -"$ref": "Operation" +"$ref": "ListSessionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"diagnose": { -"description": "Gets cluster diagnostic information. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, Operation.response contains DiagnoseClusterResults (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", +"terminate": { +"description": "Terminates the interactive session.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}:terminate", "httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.diagnose", +"id": "dataproc.projects.locations.sessions.terminate", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"name" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", -"location": "path", -"required": true, -"type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", -"location": "path", -"required": true, -"type": "string" -}, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"name": { +"description": "Required. The name of the session resource to terminate.", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", +"path": "v1/{+name}:terminate", "request": { -"$ref": "DiagnoseClusterRequest" +"$ref": "TerminateSessionRequest" }, "response": { "$ref": "Operation" @@ -1651,990 +1939,1073 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +} }, -"get": { -"description": "Gets the resource representation for a cluster in a project.", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", -"httpMethod": "GET", -"id": "dataproc.projects.regions.clusters.get", +"resources": { +"sparkApplications": { +"methods": { +"access": { +"description": "Obtain high level information corresponding to a single Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:access", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.access", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"name" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", -"location": "path", -"required": true, -"type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"path": "v1/{+name}:access", "response": { -"$ref": "Cluster" +"$ref": "AccessSessionSparkApplicationResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:getIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.getIamPolicy", +"accessEnvironmentInfo": { +"description": "Obtain environment details for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessEnvironmentInfo", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.accessEnvironmentInfo", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+resource}:getIamPolicy", -"request": { -"$ref": "GetIamPolicyRequest" +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:accessEnvironmentInfo", "response": { -"$ref": "Policy" +"$ref": "AccessSessionSparkApplicationEnvironmentInfoResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"injectCredentials": { -"description": "Inject encrypted credentials into all of the VMs in a cluster.The target cluster must be a personal auth cluster assigned to the user who is issuing the RPC.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:injectCredentials", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.injectCredentials", +"accessJob": { +"description": "Obtain data corresponding to a spark job for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessJob", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.accessJob", "parameterOrder": [ -"project", -"region", -"cluster" +"name" ], "parameters": { -"cluster": { -"description": "Required. The cluster, in the form clusters/.", -"location": "path", -"pattern": "^clusters/[^/]+$", -"required": true, +"jobId": { +"description": "Required. Job ID to fetch data for.", +"format": "int64", +"location": "query", "type": "string" }, -"project": { -"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to, of the form projects/.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"region": { -"description": "Required. The region containing the cluster, of the form regions/.", -"location": "path", -"pattern": "^regions/[^/]+$", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", "type": "string" } }, -"path": "v1/{+project}/{+region}/{+cluster}:injectCredentials", -"request": { -"$ref": "InjectCredentialsRequest" -}, +"path": "v1/{+name}:accessJob", "response": { -"$ref": "Operation" +"$ref": "AccessSessionSparkApplicationJobResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists all regions/{region}/clusters in a project alphabetically.", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters", +"accessSqlPlan": { +"description": "Obtain Spark Plan Graph for a Spark Application SQL execution. Limits the number of clusters returned as part of the graph to 10000.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessSqlPlan", "httpMethod": "GET", -"id": "dataproc.projects.regions.clusters.list", +"id": "dataproc.projects.locations.sessions.sparkApplications.accessSqlPlan", "parameterOrder": [ -"projectId", -"region" +"name" ], "parameters": { -"filter": { -"description": "Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, or STOPPED. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING, ERROR, STOPPING, and STOPPED states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *", -"location": "query", -"type": "string" -}, -"pageSize": { -"description": "Optional. The standard List page size.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "Optional. The standard List page token.", +"executionId": { +"description": "Required. Execution ID", +"format": "int64", "location": "query", "type": "string" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/clusters", +"path": "v1/{+name}:accessSqlPlan", "response": { -"$ref": "ListClustersResponse" +"$ref": "AccessSessionSparkApplicationSqlSparkPlanGraphResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"patch": { -"description": "Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", -"httpMethod": "PATCH", -"id": "dataproc.projects.regions.clusters.patch", +"accessSqlQuery": { +"description": "Obtain data corresponding to a particular SQL Query for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessSqlQuery", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.accessSqlQuery", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"name" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", -"location": "path", -"required": true, -"type": "string" -}, -"gracefulDecommissionTimeout": { -"description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", -"format": "google-duration", +"details": { +"description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", "location": "query", -"type": "string" +"type": "boolean" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", -"location": "path", -"required": true, +"executionId": { +"description": "Required. Execution ID", +"format": "int64", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"parent": { +"description": "Required. Parent (Session) resource reference.", "location": "query", "type": "string" }, -"updateMask": { -"description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", -"format": "google-fieldmask", +"planDescription": { +"description": "Optional. Enables/ disables physical plan description on demand", "location": "query", -"type": "string" +"type": "boolean" } }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", -"request": { -"$ref": "Cluster" -}, +"path": "v1/{+name}:accessSqlQuery", "response": { -"$ref": "Operation" +"$ref": "AccessSessionSparkApplicationSqlQueryResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"repair": { -"description": "Repairs a cluster.", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.repair", +"accessStageAttempt": { +"description": "Obtain data corresponding to a spark stage attempt for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessStageAttempt", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.accessStageAttempt", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"name" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", -"location": "path", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"stageAttemptId": { +"description": "Required. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", "type": "string" -} }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", -"request": { -"$ref": "RepairClusterRequest" +"summaryMetricsMask": { +"description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:accessStageAttempt", "response": { -"$ref": "Operation" +"$ref": "AccessSessionSparkApplicationStageAttemptResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"setIamPolicy": { -"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.setIamPolicy", +"accessStageRddGraph": { +"description": "Obtain RDD operation graph for a Spark Application Stage. Limits the number of clusters returned as part of the graph to 10000.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:accessStageRddGraph", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.accessStageRddGraph", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+resource}:setIamPolicy", -"request": { -"$ref": "SetIamPolicyRequest" +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:accessStageRddGraph", "response": { -"$ref": "Policy" +"$ref": "AccessSessionSparkApplicationStageRddOperationGraphResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"start": { -"description": "Starts a cluster in a project.", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.start", +"search": { +"description": "Obtain high level information and list of Spark Applications corresponding to a batch", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications:search", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.search", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"parent" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", -"location": "path", -"required": true, +"applicationStatus": { +"description": "Optional. Search only applications in the chosen state.", +"enum": [ +"APPLICATION_STATUS_UNSPECIFIED", +"APPLICATION_STATUS_RUNNING", +"APPLICATION_STATUS_COMPLETED" +], +"enumDescriptions": [ +"", +"", +"" +], +"location": "query", "type": "string" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", -"location": "path", -"required": true, +"maxEndTime": { +"description": "Optional. Latest end timestamp to list.", +"format": "google-datetime", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"maxTime": { +"description": "Optional. Latest start timestamp to list.", +"format": "google-datetime", +"location": "query", "type": "string" -} }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", -"request": { -"$ref": "StartClusterRequest" +"minEndTime": { +"description": "Optional. Earliest end timestamp to list.", +"format": "google-datetime", +"location": "query", +"type": "string" +}, +"minTime": { +"description": "Optional. Earliest start timestamp to list.", +"format": "google-datetime", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. Maximum number of applications to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplications call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" }, +"parent": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID\"", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/sparkApplications:search", "response": { -"$ref": "Operation" +"$ref": "SearchSessionSparkApplicationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"stop": { -"description": "Stops a cluster in a project.", -"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.stop", +"searchExecutorStageSummary": { +"description": "Obtain executor summary with respect to a spark stage attempt.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchExecutorStageSummary", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchExecutorStageSummary", "parameterOrder": [ -"projectId", -"region", -"clusterName" +"name" ], "parameters": { -"clusterName": { -"description": "Required. The cluster name.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", -"location": "path", -"required": true, +"pageSize": { +"description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationExecutorStageSummary call. Provide this token to retrieve the subsequent page.", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", "type": "string" -} }, -"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", -"request": { -"$ref": "StopClusterRequest" +"stageAttemptId": { +"description": "Required. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Required. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:searchExecutorStageSummary", "response": { -"$ref": "Operation" +"$ref": "SearchSessionSparkApplicationExecutorStageSummaryResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"testIamPermissions": { -"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:testIamPermissions", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.testIamPermissions", +"searchExecutors": { +"description": "Obtain data corresponding to executors for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchExecutors", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchExecutors", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"executorStatus": { +"description": "Optional. Filter to select whether active/ dead or all executors should be selected.", +"enum": [ +"EXECUTOR_STATUS_UNSPECIFIED", +"EXECUTOR_STATUS_ACTIVE", +"EXECUTOR_STATUS_DEAD" +], +"enumDescriptions": [ +"", +"", +"" +], +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+resource}:testIamPermissions", -"request": { -"$ref": "TestIamPermissionsRequest" +"pageSize": { +"description": "Optional. Maximum number of executors to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" }, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationExecutors call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}:searchExecutors", "response": { -"$ref": "TestIamPermissionsResponse" +"$ref": "SearchSessionSparkApplicationExecutorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} }, -"resources": { -"nodeGroups": { -"methods": { -"create": { -"description": "Creates a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.nodeGroups.create", +"searchJobs": { +"description": "Obtain list of spark jobs corresponding to a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchJobs", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchJobs", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"nodeGroupId": { -"description": "Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", +"jobStatus": { +"description": "Optional. List only jobs in the specific state.", +"enum": [ +"JOB_EXECUTION_STATUS_UNSPECIFIED", +"JOB_EXECUTION_STATUS_RUNNING", +"JOB_EXECUTION_STATUS_SUCCEEDED", +"JOB_EXECUTION_STATUS_FAILED", +"JOB_EXECUTION_STATUS_UNKNOWN" +], +"enumDescriptions": [ +"", +"", +"", +"", +"" +], "location": "query", "type": "string" }, -"parent": { -"description": "Required. The parent resource where this node group will be created. Format: projects/{project}/regions/{region}/clusters/{cluster}", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"parentOperationId": { -"description": "Optional. operation id of the parent operation sending the create request", +"pageSize": { +"description": "Optional. Maximum number of jobs to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationJobs call. Provide this token to retrieve the subsequent page.", "location": "query", "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequest) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"parent": { +"description": "Required. Parent (Session) resource reference.", "location": "query", "type": "string" } }, -"path": "v1/{+parent}/nodeGroups", -"request": { -"$ref": "NodeGroup" -}, +"path": "v1/{+name}:searchJobs", "response": { -"$ref": "Operation" +"$ref": "SearchSessionSparkApplicationJobsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the resource representation for a node group in a cluster.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}", +"searchSqlQueries": { +"description": "Obtain data corresponding to SQL Queries for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchSqlQueries", "httpMethod": "GET", -"id": "dataproc.projects.regions.clusters.nodeGroups.get", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchSqlQueries", "parameterOrder": [ "name" ], "parameters": { +"details": { +"description": "Optional. Lists/ hides details of Spark plan nodes. True is set to list and false to hide.", +"location": "query", +"type": "boolean" +}, "name": { -"description": "Required. The name of the node group to retrieve. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" +}, +"pageSize": { +"description": "Optional. Maximum number of queries to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationSqlQueries call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +}, +"planDescription": { +"description": "Optional. Enables/ disables physical plan description on demand", +"location": "query", +"type": "boolean" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:searchSqlQueries", "response": { -"$ref": "NodeGroup" +"$ref": "SearchSessionSparkApplicationSqlQueriesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"repair": { -"description": "Repair nodes in a node group.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:repair", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.nodeGroups.repair", +"searchStageAttemptTasks": { +"description": "Obtain data corresponding to tasks for a spark stage attempt for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchStageAttemptTasks", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchStageAttemptTasks", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+name}:repair", -"request": { -"$ref": "RepairNodeGroupRequest" +"pageSize": { +"description": "Optional. Maximum number of tasks to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" }, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationStageAttemptTasks call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +}, +"sortRuntime": { +"description": "Optional. Sort the tasks by runtime.", +"location": "query", +"type": "boolean" +}, +"stageAttemptId": { +"description": "Optional. Stage Attempt ID", +"format": "int32", +"location": "query", +"type": "integer" +}, +"stageId": { +"description": "Optional. Stage ID", +"format": "int64", +"location": "query", +"type": "string" +}, +"taskStatus": { +"description": "Optional. List only tasks in the state.", +"enum": [ +"TASK_STATUS_UNSPECIFIED", +"TASK_STATUS_RUNNING", +"TASK_STATUS_SUCCESS", +"TASK_STATUS_FAILED", +"TASK_STATUS_KILLED", +"TASK_STATUS_PENDING" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"" +], +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}:searchStageAttemptTasks", "response": { -"$ref": "Operation" +"$ref": "SearchSessionSparkApplicationStageAttemptTasksResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"resize": { -"description": "Resizes a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:resize", -"httpMethod": "POST", -"id": "dataproc.projects.regions.clusters.nodeGroups.resize", +"searchStageAttempts": { +"description": "Obtain data corresponding to a spark stage attempts for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchStageAttempts", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchStageAttempts", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+name}:resize", -"request": { -"$ref": "ResizeNodeGroupRequest" +"pageSize": { +"description": "Optional. Maximum number of stage attempts (paging based on stage_attempt_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" }, -"response": { -"$ref": "Operation" +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationStageAttempts call. Provide this token to retrieve the subsequent page.", +"location": "query", +"type": "string" }, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -} -} -} -} +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" }, -"jobs": { -"methods": { -"cancel": { -"description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or regions/{region}/jobs.get (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", -"httpMethod": "POST", -"id": "dataproc.projects.regions.jobs.cancel", -"parameterOrder": [ -"projectId", -"region", -"jobId" -], -"parameters": { -"jobId": { -"description": "Required. The job ID.", -"location": "path", -"required": true, -"type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", -"location": "path", -"required": true, +"stageId": { +"description": "Required. Stage ID for which attempts are to be fetched", +"format": "int64", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"summaryMetricsMask": { +"description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", +"format": "google-fieldmask", +"location": "query", "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", -"request": { -"$ref": "CancelJobRequest" -}, +"path": "v1/{+name}:searchStageAttempts", "response": { -"$ref": "Job" +"$ref": "SearchSessionSparkApplicationStageAttemptsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.regions.jobs.delete", +"searchStages": { +"description": "Obtain data corresponding to stages for a Spark Application.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:searchStages", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.searchStages", "parameterOrder": [ -"projectId", -"region", -"jobId" +"name" ], "parameters": { -"jobId": { -"description": "Required. The job ID.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", -"location": "path", -"required": true, +"pageSize": { +"description": "Optional. Maximum number of stages (paging based on stage_id) to return in each response. The service may return fewer than this. The default page size is 10; the maximum page size is 100.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token received from a previous SearchSessionSparkApplicationStages call. Provide this token to retrieve the subsequent page.", +"location": "query", "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +}, +"stageStatus": { +"description": "Optional. List only stages in the given state.", +"enum": [ +"STAGE_STATUS_UNSPECIFIED", +"STAGE_STATUS_ACTIVE", +"STAGE_STATUS_COMPLETE", +"STAGE_STATUS_FAILED", +"STAGE_STATUS_PENDING", +"STAGE_STATUS_SKIPPED" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"" +], +"location": "query", +"type": "string" +}, +"summaryMetricsMask": { +"description": "Optional. The list of summary metrics fields to include. Empty list will default to skip all summary metrics fields. Example, if the response should include TaskQuantileMetrics, the request should have task_quantile_metrics in summary_metrics_mask field", +"format": "google-fieldmask", +"location": "query", "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"path": "v1/{+name}:searchStages", "response": { -"$ref": "Empty" +"$ref": "SearchSessionSparkApplicationStagesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the resource representation for a job in a project.", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"summarizeExecutors": { +"description": "Obtain summary of Executor Summary for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeExecutors", "httpMethod": "GET", -"id": "dataproc.projects.regions.jobs.get", +"id": "dataproc.projects.locations.sessions.sparkApplications.summarizeExecutors", "parameterOrder": [ -"projectId", -"region", -"jobId" +"name" ], "parameters": { -"jobId": { -"description": "Required. The job ID.", -"location": "path", -"required": true, -"type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"path": "v1/{+name}:summarizeExecutors", "response": { -"$ref": "Job" +"$ref": "SummarizeSessionSparkApplicationExecutorsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:getIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.regions.jobs.getIamPolicy", +"summarizeJobs": { +"description": "Obtain summary of Jobs for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeJobs", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.summarizeJobs", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" -} }, -"path": "v1/{+resource}:getIamPolicy", -"request": { -"$ref": "GetIamPolicyRequest" +"parent": { +"description": "Required. Parent (Session) resource reference.", +"location": "query", +"type": "string" +} }, +"path": "v1/{+name}:summarizeJobs", "response": { -"$ref": "Policy" +"$ref": "SummarizeSessionSparkApplicationJobsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists regions/{region}/jobs in a project.", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs", +"summarizeStageAttemptTasks": { +"description": "Obtain summary of Tasks for a Spark Application Stage Attempt", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeStageAttemptTasks", "httpMethod": "GET", -"id": "dataproc.projects.regions.jobs.list", +"id": "dataproc.projects.locations.sessions.sparkApplications.summarizeStageAttemptTasks", "parameterOrder": [ -"projectId", -"region" +"name" ], "parameters": { -"clusterName": { -"description": "Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster.", -"location": "query", -"type": "string" -}, -"filter": { -"description": "Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *", -"location": "query", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", +"required": true, "type": "string" }, -"jobStateMatcher": { -"description": "Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs).If filter is provided, jobStateMatcher will be ignored.", -"enum": [ -"ALL", -"ACTIVE", -"NON_ACTIVE" -], -"enumDescriptions": [ -"Match all jobs, regardless of state.", -"Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", -"Only match jobs in terminal states: CANCELLED, DONE, or ERROR." -], +"parent": { +"description": "Required. Parent (Session) resource reference.", "location": "query", "type": "string" }, -"pageSize": { -"description": "Optional. The number of results to return in each response.", +"stageAttemptId": { +"description": "Required. Stage Attempt ID", "format": "int32", "location": "query", "type": "integer" }, -"pageToken": { -"description": "Optional. The page token, returned by a previous call, to request the next page of results.", +"stageId": { +"description": "Required. Stage ID", +"format": "int64", "location": "query", "type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", -"location": "path", -"required": true, -"type": "string" -}, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", -"location": "path", -"required": true, -"type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/jobs", +"path": "v1/{+name}:summarizeStageAttemptTasks", "response": { -"$ref": "ListJobsResponse" +"$ref": "SummarizeSessionSparkApplicationStageAttemptTasksResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"patch": { -"description": "Updates a job in a project.", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", -"httpMethod": "PATCH", -"id": "dataproc.projects.regions.jobs.patch", +"summarizeStages": { +"description": "Obtain summary of Stages for a Spark Application", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:summarizeStages", +"httpMethod": "GET", +"id": "dataproc.projects.locations.sessions.sparkApplications.summarizeStages", "parameterOrder": [ -"projectId", -"region", -"jobId" +"name" ], "parameters": { -"jobId": { -"description": "Required. The job ID.", -"location": "path", -"required": true, -"type": "string" -}, -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", -"location": "path", -"required": true, -"type": "string" -}, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"name": { +"description": "Required. The fully qualified name of the session to retrieve in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" }, -"updateMask": { -"description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", -"format": "google-fieldmask", +"parent": { +"description": "Required. Parent (Session) resource reference.", "location": "query", "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", -"request": { -"$ref": "Job" -}, +"path": "v1/{+name}:summarizeStages", "response": { -"$ref": "Job" +"$ref": "SummarizeSessionSparkApplicationStagesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"setIamPolicy": { -"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", +"write": { +"description": "Write wrapper objects from dataplane to spanner", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}/sparkApplications/{sparkApplicationsId}:write", "httpMethod": "POST", -"id": "dataproc.projects.regions.jobs.setIamPolicy", +"id": "dataproc.projects.locations.sessions.sparkApplications.write", "parameterOrder": [ -"resource" +"name" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"name": { +"description": "Required. The fully qualified name of the spark application to write data about in the format \"projects/PROJECT_ID/locations/DATAPROC_REGION/sessions/SESSION_ID/sparkApplications/APPLICATION_ID\"", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/sessions/[^/]+/sparkApplications/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+resource}:setIamPolicy", +"path": "v1/{+name}:write", "request": { -"$ref": "SetIamPolicyRequest" +"$ref": "WriteSessionSparkApplicationContextRequest" }, "response": { -"$ref": "Policy" +"$ref": "WriteSessionSparkApplicationContextResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +} +} +} +} }, -"submit": { -"description": "Submits a job to a cluster.", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submit", +"workflowTemplates": { +"methods": { +"create": { +"description": "Creates new workflow template.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", "httpMethod": "POST", -"id": "dataproc.projects.regions.jobs.submit", +"id": "dataproc.projects.locations.workflowTemplates.create", "parameterOrder": [ -"projectId", -"region" +"parent" ], "parameters": { -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", -"location": "path", -"required": true, -"type": "string" -}, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"parent": { +"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/projects/{projectId}/regions/{region}/jobs:submit", +"path": "v1/{+parent}/workflowTemplates", "request": { -"$ref": "SubmitJobRequest" +"$ref": "WorkflowTemplate" }, "response": { -"$ref": "Job" +"$ref": "WorkflowTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"submitAsOperation": { -"description": "Submits job to a cluster.", -"flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", -"httpMethod": "POST", -"id": "dataproc.projects.regions.jobs.submitAsOperation", +"delete": { +"description": "Deletes a workflow template. It does not cancel in-progress workflows.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.locations.workflowTemplates.delete", "parameterOrder": [ -"projectId", -"region" +"name" ], "parameters": { -"projectId": { -"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", -"location": "path", -"required": true, -"type": "string" -}, -"region": { -"description": "Required. The Dataproc region in which to handle the request.", +"name": { +"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" -} -}, -"path": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", -"request": { -"$ref": "SubmitJobRequest" -}, -"response": { -"$ref": "Operation" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] }, -"testIamPermissions": { -"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:testIamPermissions", -"httpMethod": "POST", -"id": "dataproc.projects.regions.jobs.testIamPermissions", -"parameterOrder": [ -"resource" -], -"parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", -"location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", -"required": true, -"type": "string" +"version": { +"description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", +"format": "int32", +"location": "query", +"type": "integer" } }, -"path": "v1/{+resource}:testIamPermissions", -"request": { -"$ref": "TestIamPermissionsRequest" -}, +"path": "v1/{+name}", "response": { -"$ref": "TestIamPermissionsResponse" +"$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} -} }, -"operations": { -"methods": { -"cancel": { -"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel", -"httpMethod": "POST", -"id": "dataproc.projects.regions.operations.cancel", +"get": { +"description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", +"httpMethod": "GET", +"id": "dataproc.projects.locations.workflowTemplates.get", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource to be cancelled.", +"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" +}, +"version": { +"description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", +"format": "int32", +"location": "query", +"type": "integer" } }, -"path": "v1/{+name}:cancel", +"path": "v1/{+name}", "response": { -"$ref": "Empty" +"$ref": "WorkflowTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"delete": { -"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.regions.operations.delete", +"getIamPolicy": { +"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.locations.workflowTemplates.getIamPolicy", "parameterOrder": [ -"name" +"resource" ], "parameters": { -"name": { -"description": "The name of the operation resource to be deleted.", +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+resource}:getIamPolicy", +"request": { +"$ref": "GetIamPolicyRequest" +}, "response": { -"$ref": "Empty" +"$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", -"httpMethod": "GET", -"id": "dataproc.projects.regions.operations.get", +"instantiate": { +"description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:instantiate", +"httpMethod": "POST", +"id": "dataproc.projects.locations.workflowTemplates.instantiate", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "The name of the operation resource.", +"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+name}:instantiate", +"request": { +"$ref": "InstantiateWorkflowTemplateRequest" +}, "response": { "$ref": "Operation" }, @@ -2642,70 +3013,70 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:getIamPolicy", +"instantiateInline": { +"description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates:instantiateInline", "httpMethod": "POST", -"id": "dataproc.projects.regions.operations.getIamPolicy", +"id": "dataproc.projects.locations.workflowTemplates.instantiateInline", "parameterOrder": [ -"resource" +"parent" ], "parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"parent": { +"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+$", "required": true, "type": "string" +}, +"requestId": { +"description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" } }, -"path": "v1/{+resource}:getIamPolicy", +"path": "v1/{+parent}/workflowTemplates:instantiateInline", "request": { -"$ref": "GetIamPolicyRequest" +"$ref": "WorkflowTemplate" }, "response": { -"$ref": "Policy" +"$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", +"description": "Lists workflows that match the specified filter in the request.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates", "httpMethod": "GET", -"id": "dataproc.projects.regions.operations.list", +"id": "dataproc.projects.locations.workflowTemplates.list", "parameterOrder": [ -"name" +"parent" ], "parameters": { -"filter": { -"description": "The standard list filter.", -"location": "query", -"type": "string" -}, -"name": { -"description": "The name of the operation's parent resource.", -"location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations$", -"required": true, -"type": "string" -}, "pageSize": { -"description": "The standard list page size.", +"description": "Optional. The maximum number of results to return in each response.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The standard list page token.", +"description": "Optional. The page token, returned by a previous call, to request the next page of results.", "location": "query", "type": "string" +}, +"parent": { +"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" } }, -"path": "v1/{+name}", +"path": "v1/{+parent}/workflowTemplates", "response": { -"$ref": "ListOperationsResponse" +"$ref": "ListWorkflowTemplatesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2713,9 +3084,9 @@ }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", "httpMethod": "POST", -"id": "dataproc.projects.regions.operations.setIamPolicy", +"id": "dataproc.projects.locations.workflowTemplates.setIamPolicy", "parameterOrder": [ "resource" ], @@ -2723,7 +3094,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } @@ -2741,9 +3112,9 @@ }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:testIamPermissions", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", "httpMethod": "POST", -"id": "dataproc.projects.regions.operations.testIamPermissions", +"id": "dataproc.projects.locations.workflowTemplates.testIamPermissions", "parameterOrder": [ "resource" ], @@ -2751,7 +3122,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } @@ -2766,29 +3137,25 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] -} -} }, -"workflowTemplates": { -"methods": { -"create": { -"description": "Creates new workflow template.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", -"httpMethod": "POST", -"id": "dataproc.projects.regions.workflowTemplates.create", +"update": { +"description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}", +"httpMethod": "PUT", +"id": "dataproc.projects.locations.workflowTemplates.update", "parameterOrder": [ -"parent" +"name" ], "parameters": { -"parent": { -"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"name": { +"description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/workflowTemplates/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/workflowTemplates", +"path": "v1/{+name}", "request": { "$ref": "WorkflowTemplate" }, @@ -2798,169 +3165,132 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +} +} +} +} }, -"delete": { -"description": "Deletes a workflow template. It does not cancel in-progress workflows.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", -"httpMethod": "DELETE", -"id": "dataproc.projects.regions.workflowTemplates.delete", +"regions": { +"resources": { +"autoscalingPolicies": { +"methods": { +"create": { +"description": "Creates new autoscaling policy.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", +"httpMethod": "POST", +"id": "dataproc.projects.regions.autoscalingPolicies.create", "parameterOrder": [ -"name" +"parent" ], "parameters": { -"name": { -"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"parent": { +"description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, "type": "string" -}, -"version": { -"description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", -"format": "int32", -"location": "query", -"type": "integer" } }, -"path": "v1/{+name}", +"path": "v1/{+parent}/autoscalingPolicies", +"request": { +"$ref": "AutoscalingPolicy" +}, "response": { -"$ref": "Empty" +"$ref": "AutoscalingPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"get": { -"description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", -"httpMethod": "GET", -"id": "dataproc.projects.regions.workflowTemplates.get", +"delete": { +"description": "Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.regions.autoscalingPolicies.delete", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.delete, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" -}, -"version": { -"description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", -"format": "int32", -"location": "query", -"type": "integer" } }, "path": "v1/{+name}", "response": { -"$ref": "WorkflowTemplate" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, -"getIamPolicy": { -"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", -"httpMethod": "POST", -"id": "dataproc.projects.regions.workflowTemplates.getIamPolicy", -"parameterOrder": [ -"resource" -], -"parameters": { -"resource": { -"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", -"location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+resource}:getIamPolicy", -"request": { -"$ref": "GetIamPolicyRequest" -}, -"response": { -"$ref": "Policy" +"$ref": "Empty" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"instantiate": { -"description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:instantiate", -"httpMethod": "POST", -"id": "dataproc.projects.regions.workflowTemplates.instantiate", +"get": { +"description": "Retrieves autoscaling policy.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", +"httpMethod": "GET", +"id": "dataproc.projects.regions.autoscalingPolicies.get", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "Required. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies.get, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+name}:instantiate", -"request": { -"$ref": "InstantiateWorkflowTemplateRequest" -}, +"path": "v1/{+name}", "response": { -"$ref": "Operation" +"$ref": "AutoscalingPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, -"instantiateInline": { -"description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates:instantiateInline", +"getIamPolicy": { +"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:getIamPolicy", "httpMethod": "POST", -"id": "dataproc.projects.regions.workflowTemplates.instantiateInline", +"id": "dataproc.projects.regions.autoscalingPolicies.getIamPolicy", "parameterOrder": [ -"parent" +"resource" ], "parameters": { -"parent": { -"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" -}, -"requestId": { -"description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"location": "query", -"type": "string" } }, -"path": "v1/{+parent}/workflowTemplates:instantiateInline", +"path": "v1/{+resource}:getIamPolicy", "request": { -"$ref": "WorkflowTemplate" +"$ref": "GetIamPolicyRequest" }, "response": { -"$ref": "Operation" +"$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "Lists workflows that match the specified filter in the request.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", +"description": "Lists autoscaling policies in the project.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies", "httpMethod": "GET", -"id": "dataproc.projects.regions.workflowTemplates.list", +"id": "dataproc.projects.regions.autoscalingPolicies.list", "parameterOrder": [ "parent" ], "parameters": { "pageSize": { -"description": "Optional. The maximum number of results to return in each response.", +"description": "Optional. The maximum number of results to return in each response. Must be less than or equal to 1000. Defaults to 100.", "format": "int32", "location": "query", "type": "integer" @@ -2971,16 +3301,16 @@ "type": "string" }, "parent": { -"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"description": "Required. The \"resource name\" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies.list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.autoscalingPolicies.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", "location": "path", "pattern": "^projects/[^/]+/regions/[^/]+$", "required": true, "type": "string" } }, -"path": "v1/{+parent}/workflowTemplates", +"path": "v1/{+parent}/autoscalingPolicies", "response": { -"$ref": "ListWorkflowTemplatesResponse" +"$ref": "ListAutoscalingPoliciesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" @@ -2988,9 +3318,9 @@ }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy", "httpMethod": "POST", -"id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", +"id": "dataproc.projects.regions.autoscalingPolicies.setIamPolicy", "parameterOrder": [ "resource" ], @@ -2998,7 +3328,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } @@ -3016,9 +3346,9 @@ }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:testIamPermissions", "httpMethod": "POST", -"id": "dataproc.projects.regions.workflowTemplates.testIamPermissions", +"id": "dataproc.projects.regions.autoscalingPolicies.testIamPermissions", "parameterOrder": [ "resource" ], @@ -3026,7 +3356,7 @@ "resource": { "description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } @@ -3043,679 +3373,5152 @@ ] }, "update": { -"description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", -"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", +"description": "Updates (replaces) autoscaling policy.Disabled check for update_mask, because all updates will be full replacements.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}", "httpMethod": "PUT", -"id": "dataproc.projects.regions.workflowTemplates.update", +"id": "dataproc.projects.regions.autoscalingPolicies.update", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", "location": "path", -"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"pattern": "^projects/[^/]+/regions/[^/]+/autoscalingPolicies/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "request": { -"$ref": "WorkflowTemplate" +"$ref": "AutoscalingPolicy" }, "response": { -"$ref": "WorkflowTemplate" +"$ref": "AutoscalingPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } } -} -} -} -} -} -}, -"revision": "20240821", -"rootUrl": "https://dataproc.googleapis.com/", -"schemas": { -"AcceleratorConfig": { -"description": "Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).", -"id": "AcceleratorConfig", -"properties": { -"acceleratorCount": { -"description": "The number of the accelerator cards of this type exposed to this instance.", -"format": "int32", -"type": "integer" -}, -"acceleratorTypeUri": { -"description": "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4.", -"type": "string" -} -}, -"type": "object" -}, -"AnalyzeBatchRequest": { -"description": "A request to analyze a batch workload.", -"id": "AnalyzeBatchRequest", -"properties": { -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first request created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"type": "string" -} -}, -"type": "object" -}, -"AnalyzeOperationMetadata": { -"description": "Metadata describing the Analyze operation.", -"id": "AnalyzeOperationMetadata", -"properties": { -"analyzedWorkloadName": { -"description": "Output only. name of the workload being analyzed.", -"readOnly": true, -"type": "string" }, -"analyzedWorkloadType": { -"description": "Output only. Type of the workload being analyzed.", +"clusters": { +"methods": { +"create": { +"description": "Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.create", +"parameterOrder": [ +"projectId", +"region" +], +"parameters": { +"actionOnFailedPrimaryWorkers": { +"description": "Optional. Failure action when primary worker creation fails.", "enum": [ -"WORKLOAD_TYPE_UNSPECIFIED", -"BATCH" +"FAILURE_ACTION_UNSPECIFIED", +"NO_ACTION", +"DELETE" ], "enumDescriptions": [ -"Undefined option", -"Serverless batch job" +"When FailureAction is unspecified, failure action defaults to NO_ACTION.", +"Take no action on failure to create a cluster resource. NO_ACTION is the default.", +"Delete the failed cluster resource." ], -"readOnly": true, +"location": "query", "type": "string" }, -"analyzedWorkloadUuid": { -"description": "Output only. unique identifier of the workload typically generated by control plane. E.g. batch uuid.", -"readOnly": true, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"location": "path", +"required": true, "type": "string" }, -"createTime": { -"description": "Output only. The time when the operation was created.", -"format": "google-datetime", -"readOnly": true, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, "type": "string" }, -"description": { -"description": "Output only. Short description of the operation.", -"readOnly": true, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", "type": "string" +} }, -"doneTime": { -"description": "Output only. The time when the operation finished.", -"format": "google-datetime", -"readOnly": true, +"path": "v1/projects/{projectId}/regions/{region}/clusters", +"request": { +"$ref": "Cluster" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"httpMethod": "DELETE", +"id": "dataproc.projects.regions.clusters.delete", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"clusterUuid": { +"description": "Optional. Specifying the cluster_uuid means the RPC should fail (with error NOT_FOUND) if cluster with specified UUID does not exist.", +"location": "query", +"type": "string" +}, +"gracefulTerminationTimeout": { +"description": "Optional. The graceful termination timeout for the deletion of the cluster. Indicate the time the request will wait to complete the running jobs on the cluster before its forceful deletion. Default value is 0 indicating that the user has not enabled the graceful termination. Value can be between 60 second and 6 Hours, in case the graceful termination is enabled. (There is no separate flag to check the enabling or disabling of graceful termination, it can be checked by the values in the field).", +"format": "google-duration", +"location": "query", +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two DeleteClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"diagnose": { +"description": "Gets cluster diagnostic information. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, Operation.response contains DiagnoseClusterResults (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.diagnose", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose", +"request": { +"$ref": "DiagnoseClusterRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the resource representation for a cluster in a project.", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"httpMethod": "GET", +"id": "dataproc.projects.regions.clusters.get", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"response": { +"$ref": "Cluster" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:getIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.getIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:getIamPolicy", +"request": { +"$ref": "GetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"injectCredentials": { +"description": "Inject encrypted credentials into all of the VMs in a cluster.The target cluster must be a personal auth cluster assigned to the user who is issuing the RPC.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:injectCredentials", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.injectCredentials", +"parameterOrder": [ +"project", +"region", +"cluster" +], +"parameters": { +"cluster": { +"description": "Required. The cluster, in the form clusters/.", +"location": "path", +"pattern": "^clusters/[^/]+$", +"required": true, +"type": "string" +}, +"project": { +"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to, of the form projects/.", +"location": "path", +"pattern": "^projects/[^/]+$", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The region containing the cluster, of the form regions/.", +"location": "path", +"pattern": "^regions/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+project}/{+region}/{+cluster}:injectCredentials", +"request": { +"$ref": "InjectCredentialsRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists all regions/{region}/clusters in a project alphabetically.", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters", +"httpMethod": "GET", +"id": "dataproc.projects.regions.clusters.list", +"parameterOrder": [ +"projectId", +"region" +], +"parameters": { +"filter": { +"description": "Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, or STOPPED. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING, ERROR, STOPPING, and STOPPED states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. The standard List page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The standard List page token.", +"location": "query", +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters", +"response": { +"$ref": "ListClustersResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"httpMethod": "PATCH", +"id": "dataproc.projects.regions.clusters.patch", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"gracefulDecommissionTimeout": { +"description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", +"format": "google-duration", +"location": "query", +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two UpdateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" +}, +"updateMask": { +"description": "Required. Specifies the path, relative to Cluster, of the field to update. For example, to change the number of workers in a cluster to 5, the update_mask parameter would be specified as config.worker_config.num_instances, and the PATCH request body would specify the new value, as follows: { \"config\":{ \"workerConfig\":{ \"numInstances\":\"5\" } } } Similarly, to change the number of preemptible workers in a cluster to 5, the update_mask parameter would be config.secondary_worker_config.num_instances, and the PATCH request body would be set as follows: { \"config\":{ \"secondaryWorkerConfig\":{ \"numInstances\":\"5\" } } } *Note:* Currently, only the following fields can be updated: *Mask* *Purpose* *labels* Update labels *config.worker_config.num_instances* Resize primary worker group *config.secondary_worker_config.num_instances* Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or change autoscaling policies ", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}", +"request": { +"$ref": "Cluster" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"repair": { +"description": "Repairs a cluster.", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.repair", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair", +"request": { +"$ref": "RepairClusterRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.setIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:setIamPolicy", +"request": { +"$ref": "SetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"start": { +"description": "Starts a cluster in a project.", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.start", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start", +"request": { +"$ref": "StartClusterRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"stop": { +"description": "Stops a cluster in a project.", +"flatPath": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.stop", +"parameterOrder": [ +"projectId", +"region", +"clusterName" +], +"parameters": { +"clusterName": { +"description": "Required. The cluster name.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project the cluster belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop", +"request": { +"$ref": "StopClusterRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:testIamPermissions", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.testIamPermissions", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:testIamPermissions", +"request": { +"$ref": "TestIamPermissionsRequest" +}, +"response": { +"$ref": "TestIamPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"nodeGroups": { +"methods": { +"create": { +"description": "Creates a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.nodeGroups.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"nodeGroupId": { +"description": "Optional. An optional node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource where this node group will be created. Format: projects/{project}/regions/{region}/clusters/{cluster}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+$", +"required": true, +"type": "string" +}, +"parentOperationId": { +"description": "Optional. operation id of the parent operation sending the create request", +"location": "query", +"type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two CreateNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequest) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/nodeGroups", +"request": { +"$ref": "NodeGroup" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the resource representation for a node group in a cluster.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}", +"httpMethod": "GET", +"id": "dataproc.projects.regions.clusters.nodeGroups.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the node group to retrieve. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "NodeGroup" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"repair": { +"description": "Repair nodes in a node group.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:repair", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.nodeGroups.repair", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:repair", +"request": { +"$ref": "RepairNodeGroupRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"resize": { +"description": "Resizes a node group in a cluster. The returned Operation.metadata is NodeGroupOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}/nodeGroups/{nodeGroupsId}:resize", +"httpMethod": "POST", +"id": "dataproc.projects.regions.clusters.nodeGroups.resize", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the node group to resize. Format: projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/clusters/[^/]+/nodeGroups/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:resize", +"request": { +"$ref": "ResizeNodeGroupRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, +"jobs": { +"methods": { +"cancel": { +"description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or regions/{region}/jobs.get (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", +"httpMethod": "POST", +"id": "dataproc.projects.regions.jobs.cancel", +"parameterOrder": [ +"projectId", +"region", +"jobId" +], +"parameters": { +"jobId": { +"description": "Required. The job ID.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel", +"request": { +"$ref": "CancelJobRequest" +}, +"response": { +"$ref": "Job" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.regions.jobs.delete", +"parameterOrder": [ +"projectId", +"region", +"jobId" +], +"parameters": { +"jobId": { +"description": "Required. The job ID.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the resource representation for a job in a project.", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"httpMethod": "GET", +"id": "dataproc.projects.regions.jobs.get", +"parameterOrder": [ +"projectId", +"region", +"jobId" +], +"parameters": { +"jobId": { +"description": "Required. The job ID.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"response": { +"$ref": "Job" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:getIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.jobs.getIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:getIamPolicy", +"request": { +"$ref": "GetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists regions/{region}/jobs in a project.", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs", +"httpMethod": "GET", +"id": "dataproc.projects.regions.jobs.list", +"parameterOrder": [ +"projectId", +"region" +], +"parameters": { +"clusterName": { +"description": "Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster.", +"location": "query", +"type": "string" +}, +"filter": { +"description": "Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or NON_ACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *", +"location": "query", +"type": "string" +}, +"jobStateMatcher": { +"description": "Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs).If filter is provided, jobStateMatcher will be ignored.", +"enum": [ +"ALL", +"ACTIVE", +"NON_ACTIVE" +], +"enumDescriptions": [ +"Match all jobs, regardless of state.", +"Only match jobs in non-terminal states: PENDING, RUNNING, or CANCEL_PENDING.", +"Only match jobs in terminal states: CANCELLED, DONE, or ERROR." +], +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. The number of results to return in each response.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The page token, returned by a previous call, to request the next page of results.", +"location": "query", +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs", +"response": { +"$ref": "ListJobsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates a job in a project.", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"httpMethod": "PATCH", +"id": "dataproc.projects.regions.jobs.patch", +"parameterOrder": [ +"projectId", +"region", +"jobId" +], +"parameters": { +"jobId": { +"description": "Required. The job ID.", +"location": "path", +"required": true, +"type": "string" +}, +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the PATCH request body would specify the new value. *Note:* Currently, labels is the only field that can be updated.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}", +"request": { +"$ref": "Job" +}, +"response": { +"$ref": "Job" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.jobs.setIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:setIamPolicy", +"request": { +"$ref": "SetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"submit": { +"description": "Submits a job to a cluster.", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submit", +"httpMethod": "POST", +"id": "dataproc.projects.regions.jobs.submit", +"parameterOrder": [ +"projectId", +"region" +], +"parameters": { +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs:submit", +"request": { +"$ref": "SubmitJobRequest" +}, +"response": { +"$ref": "Job" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"submitAsOperation": { +"description": "Submits job to a cluster.", +"flatPath": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", +"httpMethod": "POST", +"id": "dataproc.projects.regions.jobs.submitAsOperation", +"parameterOrder": [ +"projectId", +"region" +], +"parameters": { +"projectId": { +"description": "Required. The ID of the Google Cloud Platform project that the job belongs to.", +"location": "path", +"required": true, +"type": "string" +}, +"region": { +"description": "Required. The Dataproc region in which to handle the request.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation", +"request": { +"$ref": "SubmitJobRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:testIamPermissions", +"httpMethod": "POST", +"id": "dataproc.projects.regions.jobs.testIamPermissions", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/jobs/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:testIamPermissions", +"request": { +"$ref": "TestIamPermissionsRequest" +}, +"response": { +"$ref": "TestIamPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "dataproc.projects.regions.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:cancel", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.regions.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "dataproc.projects.regions.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:getIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.operations.getIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:getIamPolicy", +"request": { +"$ref": "GetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations", +"httpMethod": "GET", +"id": "dataproc.projects.regions.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "ListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.operations.setIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:setIamPolicy", +"request": { +"$ref": "SetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:testIamPermissions", +"httpMethod": "POST", +"id": "dataproc.projects.regions.operations.testIamPermissions", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:testIamPermissions", +"request": { +"$ref": "TestIamPermissionsRequest" +}, +"response": { +"$ref": "TestIamPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, +"workflowTemplates": { +"methods": { +"create": { +"description": "Creates new workflow template.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", +"httpMethod": "POST", +"id": "dataproc.projects.regions.workflowTemplates.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.create, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.create, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/workflowTemplates", +"request": { +"$ref": "WorkflowTemplate" +}, +"response": { +"$ref": "WorkflowTemplate" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a workflow template. It does not cancel in-progress workflows.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", +"httpMethod": "DELETE", +"id": "dataproc.projects.regions.workflowTemplates.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.delete, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +}, +"version": { +"description": "Optional. The version of workflow template to delete. If specified, will only delete the template if the current server version matches specified version.", +"format": "int32", +"location": "query", +"type": "integer" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", +"httpMethod": "GET", +"id": "dataproc.projects.regions.workflowTemplates.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.get, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +}, +"version": { +"description": "Optional. The version of workflow template to retrieve. Only previously instantiated versions can be retrieved.If unspecified, retrieves the current version.", +"format": "int32", +"location": "query", +"type": "integer" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "WorkflowTemplate" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.workflowTemplates.getIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:getIamPolicy", +"request": { +"$ref": "GetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"instantiate": { +"description": "Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:instantiate", +"httpMethod": "POST", +"id": "dataproc.projects.regions.workflowTemplates.instantiate", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates.instantiate, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:instantiate", +"request": { +"$ref": "InstantiateWorkflowTemplateRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"instantiateInline": { +"description": "Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates:instantiateInline", +"httpMethod": "POST", +"id": "dataproc.projects.regions.workflowTemplates.instantiateInline", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,instantiateinline, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.instantiateinline, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/workflowTemplates:instantiateInline", +"request": { +"$ref": "WorkflowTemplate" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists workflows that match the specified filter in the request.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates", +"httpMethod": "GET", +"id": "dataproc.projects.regions.workflowTemplates.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The maximum number of results to return in each response.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The page token, returned by a previous call, to request the next page of results.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates,list, the resource name of the region has the following format: projects/{project_id}/regions/{region} For projects.locations.workflowTemplates.list, the resource name of the location has the following format: projects/{project_id}/locations/{location}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/workflowTemplates", +"response": { +"$ref": "ListWorkflowTemplatesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy", +"httpMethod": "POST", +"id": "dataproc.projects.regions.workflowTemplates.setIamPolicy", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy is being specified. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:setIamPolicy", +"request": { +"$ref": "SetIamPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions", +"httpMethod": "POST", +"id": "dataproc.projects.regions.workflowTemplates.testIamPermissions", +"parameterOrder": [ +"resource" +], +"parameters": { +"resource": { +"description": "REQUIRED: The resource for which the policy detail is being requested. See Resource names (https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field.", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+resource}:testIamPermissions", +"request": { +"$ref": "TestIamPermissionsRequest" +}, +"response": { +"$ref": "TestIamPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"update": { +"description": "Updates (replaces) workflow template. The updated template must contain version that matches the current server version.", +"flatPath": "v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}", +"httpMethod": "PUT", +"id": "dataproc.projects.regions.workflowTemplates.update", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}", +"location": "path", +"pattern": "^projects/[^/]+/regions/[^/]+/workflowTemplates/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "WorkflowTemplate" +}, +"response": { +"$ref": "WorkflowTemplate" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +} +}, +"revision": "20240928", +"rootUrl": "https://dataproc.googleapis.com/", +"schemas": { +"AcceleratorConfig": { +"description": "Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).", +"id": "AcceleratorConfig", +"properties": { +"acceleratorCount": { +"description": "The number of the accelerator cards of this type exposed to this instance.", +"format": "int32", +"type": "integer" +}, +"acceleratorTypeUri": { +"description": "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4 nvidia-tesla-t4Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-t4.", +"type": "string" +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationEnvironmentInfoResponse": { +"description": "Environment details of a Saprk Application.", +"id": "AccessSessionSparkApplicationEnvironmentInfoResponse", +"properties": { +"applicationEnvironmentInfo": { +"$ref": "ApplicationEnvironmentInfo", +"description": "Details about the Environment that the application is running in." +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationJobResponse": { +"description": "Details of a particular job associated with Spark Application", +"id": "AccessSessionSparkApplicationJobResponse", +"properties": { +"jobData": { +"$ref": "JobData", +"description": "Output only. Data corresponding to a spark job.", +"readOnly": true +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationResponse": { +"description": "A summary of Spark Application", +"id": "AccessSessionSparkApplicationResponse", +"properties": { +"application": { +"$ref": "ApplicationInfo", +"description": "Output only. High level information corresponding to an application.", +"readOnly": true +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationSqlQueryResponse": { +"description": "Details of a query for a Spark Application", +"id": "AccessSessionSparkApplicationSqlQueryResponse", +"properties": { +"executionData": { +"$ref": "SqlExecutionUiData", +"description": "SQL Execution Data" +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationSqlSparkPlanGraphResponse": { +"description": "SparkPlanGraph for a Spark Application execution limited to maximum 10000 clusters.", +"id": "AccessSessionSparkApplicationSqlSparkPlanGraphResponse", +"properties": { +"sparkPlanGraph": { +"$ref": "SparkPlanGraph", +"description": "SparkPlanGraph for a Spark Application execution." +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationStageAttemptResponse": { +"description": "Stage Attempt for a Stage of a Spark Application", +"id": "AccessSessionSparkApplicationStageAttemptResponse", +"properties": { +"stageData": { +"$ref": "StageData", +"description": "Output only. Data corresponding to a stage.", +"readOnly": true +} +}, +"type": "object" +}, +"AccessSessionSparkApplicationStageRddOperationGraphResponse": { +"description": "RDD operation graph for a Spark Application Stage limited to maximum 10000 clusters.", +"id": "AccessSessionSparkApplicationStageRddOperationGraphResponse", +"properties": { +"rddOperationGraph": { +"$ref": "RddOperationGraph", +"description": "RDD operation graph for a Spark Application Stage." +} +}, +"type": "object" +}, +"AccessSparkApplicationEnvironmentInfoResponse": { +"description": "Environment details of a Saprk Application.", +"id": "AccessSparkApplicationEnvironmentInfoResponse", +"properties": { +"applicationEnvironmentInfo": { +"$ref": "ApplicationEnvironmentInfo", +"description": "Details about the Environment that the application is running in." +} +}, +"type": "object" +}, +"AccessSparkApplicationJobResponse": { +"description": "Details of a particular job associated with Spark Application", +"id": "AccessSparkApplicationJobResponse", +"properties": { +"jobData": { +"$ref": "JobData", +"description": "Output only. Data corresponding to a spark job.", +"readOnly": true +} +}, +"type": "object" +}, +"AccessSparkApplicationResponse": { +"description": "A summary of Spark Application", +"id": "AccessSparkApplicationResponse", +"properties": { +"application": { +"$ref": "ApplicationInfo", +"description": "Output only. High level information corresponding to an application.", +"readOnly": true +} +}, +"type": "object" +}, +"AccessSparkApplicationSqlQueryResponse": { +"description": "Details of a query for a Spark Application", +"id": "AccessSparkApplicationSqlQueryResponse", +"properties": { +"executionData": { +"$ref": "SqlExecutionUiData", +"description": "SQL Execution Data" +} +}, +"type": "object" +}, +"AccessSparkApplicationSqlSparkPlanGraphResponse": { +"description": "SparkPlanGraph for a Spark Application execution limited to maximum 10000 clusters.", +"id": "AccessSparkApplicationSqlSparkPlanGraphResponse", +"properties": { +"sparkPlanGraph": { +"$ref": "SparkPlanGraph", +"description": "SparkPlanGraph for a Spark Application execution." +} +}, +"type": "object" +}, +"AccessSparkApplicationStageAttemptResponse": { +"description": "Stage Attempt for a Stage of a Spark Application", +"id": "AccessSparkApplicationStageAttemptResponse", +"properties": { +"stageData": { +"$ref": "StageData", +"description": "Output only. Data corresponding to a stage.", +"readOnly": true +} +}, +"type": "object" +}, +"AccessSparkApplicationStageRddOperationGraphResponse": { +"description": "RDD operation graph for a Spark Application Stage limited to maximum 10000 clusters.", +"id": "AccessSparkApplicationStageRddOperationGraphResponse", +"properties": { +"rddOperationGraph": { +"$ref": "RddOperationGraph", +"description": "RDD operation graph for a Spark Application Stage." +} +}, +"type": "object" +}, +"AccumulableInfo": { +"id": "AccumulableInfo", +"properties": { +"accumullableInfoId": { +"format": "int64", +"type": "string" +}, +"name": { +"type": "string" +}, +"update": { +"type": "string" +}, +"value": { +"type": "string" +} +}, +"type": "object" +}, +"AnalyzeBatchRequest": { +"description": "A request to analyze a batch workload.", +"id": "AnalyzeBatchRequest", +"properties": { +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the service receives two AnalyzeBatchRequest (http://cloud/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.AnalyzeBatchRequest)s with the same request_id, the second request is ignored and the Operation that corresponds to the first request created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The value must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "string" +} +}, +"type": "object" +}, +"AnalyzeOperationMetadata": { +"description": "Metadata describing the Analyze operation.", +"id": "AnalyzeOperationMetadata", +"properties": { +"analyzedWorkloadName": { +"description": "Output only. name of the workload being analyzed.", +"readOnly": true, +"type": "string" +}, +"analyzedWorkloadType": { +"description": "Output only. Type of the workload being analyzed.", +"enum": [ +"WORKLOAD_TYPE_UNSPECIFIED", +"BATCH" +], +"enumDescriptions": [ +"Undefined option", +"Serverless batch job" +], +"readOnly": true, +"type": "string" +}, +"analyzedWorkloadUuid": { +"description": "Output only. unique identifier of the workload typically generated by control plane. E.g. batch uuid.", +"readOnly": true, +"type": "string" +}, +"createTime": { +"description": "Output only. The time when the operation was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Output only. Short description of the operation.", +"readOnly": true, +"type": "string" +}, +"doneTime": { +"description": "Output only. The time when the operation finished.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Output only. Labels associated with the operation.", +"readOnly": true, +"type": "object" +}, +"warnings": { +"description": "Output only. Warnings encountered during operation execution.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"AppSummary": { +"id": "AppSummary", +"properties": { +"numCompletedJobs": { +"format": "int32", +"type": "integer" +}, +"numCompletedStages": { +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"ApplicationAttemptInfo": { +"description": "Specific attempt of an application.", +"id": "ApplicationAttemptInfo", +"properties": { +"appSparkVersion": { +"type": "string" +}, +"attemptId": { +"type": "string" +}, +"completed": { +"type": "boolean" +}, +"durationMillis": { +"format": "int64", +"type": "string" +}, +"endTime": { +"format": "google-datetime", +"type": "string" +}, +"lastUpdated": { +"format": "google-datetime", +"type": "string" +}, +"sparkUser": { +"type": "string" +}, +"startTime": { +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"ApplicationEnvironmentInfo": { +"description": "Details about the Environment that the application is running in.", +"id": "ApplicationEnvironmentInfo", +"properties": { +"classpathEntries": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"hadoopProperties": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"metricsProperties": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"resourceProfiles": { +"items": { +"$ref": "ResourceProfileInfo" +}, +"type": "array" +}, +"runtime": { +"$ref": "SparkRuntimeInfo" +}, +"sparkProperties": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"systemProperties": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +} +}, +"type": "object" +}, +"ApplicationInfo": { +"description": "High level information corresponding to an application.", +"id": "ApplicationInfo", +"properties": { +"applicationContextIngestionStatus": { +"enum": [ +"APPLICATION_CONTEXT_INGESTION_STATUS_UNSPECIFIED", +"APPLICATION_CONTEXT_INGESTION_STATUS_COMPLETED" +], +"enumDescriptions": [ +"", +"" +], +"type": "string" +}, +"applicationId": { +"type": "string" +}, +"attempts": { +"items": { +"$ref": "ApplicationAttemptInfo" +}, +"type": "array" +}, +"coresGranted": { +"format": "int32", +"type": "integer" +}, +"coresPerExecutor": { +"format": "int32", +"type": "integer" +}, +"maxCores": { +"format": "int32", +"type": "integer" +}, +"memoryPerExecutorMb": { +"format": "int32", +"type": "integer" +}, +"name": { +"type": "string" +}, +"quantileDataStatus": { +"enum": [ +"QUANTILE_DATA_STATUS_UNSPECIFIED", +"QUANTILE_DATA_STATUS_COMPLETED", +"QUANTILE_DATA_STATUS_FAILED" +], +"enumDescriptions": [ +"", +"", +"" +], +"type": "string" +} +}, +"type": "object" +}, +"AutoscalingConfig": { +"description": "Autoscaling Policy config associated with the cluster.", +"id": "AutoscalingConfig", +"properties": { +"policyUri": { +"description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", +"type": "string" +} +}, +"type": "object" +}, +"AutoscalingPolicy": { +"description": "Describes an autoscaling policy for Dataproc cluster autoscaler.", +"id": "AutoscalingPolicy", +"properties": { +"basicAlgorithm": { +"$ref": "BasicAutoscalingAlgorithm" +}, +"id": { +"description": "Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", +"type": "string" +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.", +"type": "object" +}, +"name": { +"description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", +"readOnly": true, +"type": "string" +}, +"secondaryWorkerConfig": { +"$ref": "InstanceGroupAutoscalingPolicyConfig", +"description": "Optional. Describes how the autoscaler will operate for secondary workers." +}, +"workerConfig": { +"$ref": "InstanceGroupAutoscalingPolicyConfig", +"description": "Required. Describes how the autoscaler will operate for primary workers." +} +}, +"type": "object" +}, +"AutotuningConfig": { +"description": "Autotuning configuration of the workload.", +"id": "AutotuningConfig", +"properties": { +"scenarios": { +"description": "Optional. Scenarios for which tunings are applied.", +"items": { +"enum": [ +"SCENARIO_UNSPECIFIED", +"SCALING", +"BROADCAST_HASH_JOIN", +"MEMORY" +], +"enumDescriptions": [ +"Default value.", +"Scaling recommendations such as initialExecutors.", +"Adding hints for potential relation broadcasts.", +"Memory management for workloads." +], +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"AuxiliaryNodeGroup": { +"description": "Node group identification and configuration information.", +"id": "AuxiliaryNodeGroup", +"properties": { +"nodeGroup": { +"$ref": "NodeGroup", +"description": "Required. Node group configuration." +}, +"nodeGroupId": { +"description": "Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", +"type": "string" +} +}, +"type": "object" +}, +"AuxiliaryServicesConfig": { +"description": "Auxiliary services configuration for a Cluster.", +"id": "AuxiliaryServicesConfig", +"properties": { +"metastoreConfig": { +"$ref": "MetastoreConfig", +"description": "Optional. The Hive Metastore configuration for this workload." +}, +"sparkHistoryServerConfig": { +"$ref": "SparkHistoryServerConfig", +"description": "Optional. The Spark History Server configuration for the workload." +} +}, +"type": "object" +}, +"BasicAutoscalingAlgorithm": { +"description": "Basic algorithm for autoscaling.", +"id": "BasicAutoscalingAlgorithm", +"properties": { +"cooldownPeriod": { +"description": "Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.", +"format": "google-duration", +"type": "string" +}, +"sparkStandaloneConfig": { +"$ref": "SparkStandaloneAutoscalingConfig", +"description": "Optional. Spark Standalone autoscaling configuration" +}, +"yarnConfig": { +"$ref": "BasicYarnAutoscalingConfig", +"description": "Optional. YARN autoscaling configuration." +} +}, +"type": "object" +}, +"BasicYarnAutoscalingConfig": { +"description": "Basic autoscaling configurations for YARN.", +"id": "BasicYarnAutoscalingConfig", +"properties": { +"gracefulDecommissionTimeout": { +"description": "Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.", +"format": "google-duration", +"type": "string" +}, +"scaleDownFactor": { +"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", +"format": "double", +"type": "number" +}, +"scaleDownMinWorkerFraction": { +"description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", +"format": "double", +"type": "number" +}, +"scaleUpFactor": { +"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", +"format": "double", +"type": "number" +}, +"scaleUpMinWorkerFraction": { +"description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", +"format": "double", +"type": "number" +} +}, +"type": "object" +}, +"Batch": { +"description": "A representation of a batch workload in the service.", +"id": "Batch", +"properties": { +"createTime": { +"description": "Output only. The time when the batch was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"creator": { +"description": "Output only. The email address of the user who created the batch.", +"readOnly": true, +"type": "string" +}, +"environmentConfig": { +"$ref": "EnvironmentConfig", +"description": "Optional. Environment configuration for the batch execution." +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.", +"type": "object" +}, +"name": { +"description": "Output only. The resource name of the batch.", +"readOnly": true, +"type": "string" +}, +"operation": { +"description": "Output only. The resource name of the operation associated with this batch.", +"readOnly": true, +"type": "string" +}, +"pysparkBatch": { +"$ref": "PySparkBatch", +"description": "Optional. PySpark batch config." +}, +"runtimeConfig": { +"$ref": "RuntimeConfig", +"description": "Optional. Runtime configuration for the batch execution." +}, +"runtimeInfo": { +"$ref": "RuntimeInfo", +"description": "Output only. Runtime information about batch execution.", +"readOnly": true +}, +"sparkBatch": { +"$ref": "SparkBatch", +"description": "Optional. Spark batch config." +}, +"sparkRBatch": { +"$ref": "SparkRBatch", +"description": "Optional. SparkR batch config." +}, +"sparkSqlBatch": { +"$ref": "SparkSqlBatch", +"description": "Optional. SparkSql batch config." +}, +"state": { +"description": "Output only. The state of the batch.", +"enum": [ +"STATE_UNSPECIFIED", +"PENDING", +"RUNNING", +"CANCELLING", +"CANCELLED", +"SUCCEEDED", +"FAILED" +], +"enumDescriptions": [ +"The batch state is unknown.", +"The batch is created before running.", +"The batch is running.", +"The batch is cancelling.", +"The batch cancellation was successful.", +"The batch completed successfully.", +"The batch is no longer running due to an error." +], +"readOnly": true, +"type": "string" +}, +"stateHistory": { +"description": "Output only. Historical state information for the batch.", +"items": { +"$ref": "StateHistory" +}, +"readOnly": true, +"type": "array" +}, +"stateMessage": { +"description": "Output only. Batch state details, such as a failure description if the state is FAILED.", +"readOnly": true, +"type": "string" +}, +"stateTime": { +"description": "Output only. The time when the batch entered a current state.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"uuid": { +"description": "Output only. A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"BatchOperationMetadata": { +"description": "Metadata describing the Batch operation.", +"id": "BatchOperationMetadata", +"properties": { +"batch": { +"description": "Name of the batch for the operation.", +"type": "string" +}, +"batchUuid": { +"description": "Batch UUID for the operation.", +"type": "string" +}, +"createTime": { +"description": "The time when the operation was created.", +"format": "google-datetime", +"type": "string" +}, +"description": { +"description": "Short description of the operation.", +"type": "string" +}, +"doneTime": { +"description": "The time when the operation finished.", +"format": "google-datetime", +"type": "string" +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Labels associated with the operation.", +"type": "object" +}, +"operationType": { +"description": "The operation type.", +"enum": [ +"BATCH_OPERATION_TYPE_UNSPECIFIED", +"BATCH" +], +"enumDescriptions": [ +"Batch operation type is unknown.", +"Batch operation type." +], +"type": "string" +}, +"warnings": { +"description": "Warnings encountered during operation execution.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"Binding": { +"description": "Associates members, or principals, with a role.", +"id": "Binding", +"properties": { +"condition": { +"$ref": "Expr", +"description": "The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies)." +}, +"members": { +"description": "Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workforce identity pool. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}: All workforce identities in a group. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All workforce identities with a specific attribute value. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*: All identities in a workforce identity pool. principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workload identity pool. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}: A workload identity pool group. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All identities in a workload identity pool with a certain attribute. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*: All identities in a workload identity pool. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding. deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: Deleted single identity in a workforce identity pool. For example, deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value.", +"items": { +"type": "string" +}, +"type": "array" +}, +"role": { +"description": "Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.For an overview of the IAM roles and permissions, see the IAM documentation (https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see here (https://cloud.google.com/iam/docs/understanding-roles).", +"type": "string" +} +}, +"type": "object" +}, +"CancelJobRequest": { +"description": "A request to cancel a job.", +"id": "CancelJobRequest", +"properties": {}, +"type": "object" +}, +"Cluster": { +"description": "Describes the identifying information, config, and status of a Dataproc cluster", +"id": "Cluster", +"properties": { +"clusterName": { +"description": "Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.", +"type": "string" +}, +"clusterUuid": { +"description": "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", +"readOnly": true, +"type": "string" +}, +"config": { +"$ref": "ClusterConfig", +"description": "Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified." +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", +"type": "object" +}, +"metrics": { +"$ref": "ClusterMetrics", +"description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", +"readOnly": true +}, +"projectId": { +"description": "Required. The Google Cloud Platform project ID that the cluster belongs to.", +"type": "string" +}, +"status": { +"$ref": "ClusterStatus", +"description": "Output only. Cluster status.", +"readOnly": true +}, +"statusHistory": { +"description": "Output only. The previous cluster status.", +"items": { +"$ref": "ClusterStatus" +}, +"readOnly": true, +"type": "array" +}, +"virtualClusterConfig": { +"$ref": "VirtualClusterConfig", +"description": "Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified." +} +}, +"type": "object" +}, +"ClusterConfig": { +"description": "The cluster config.", +"id": "ClusterConfig", +"properties": { +"autoscalingConfig": { +"$ref": "AutoscalingConfig", +"description": "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset." +}, +"auxiliaryNodeGroups": { +"description": "Optional. The node group settings.", +"items": { +"$ref": "AuxiliaryNodeGroup" +}, +"type": "array" +}, +"configBucket": { +"description": "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", +"type": "string" +}, +"dataprocMetricConfig": { +"$ref": "DataprocMetricConfig", +"description": "Optional. The config for Dataproc metrics." +}, +"encryptionConfig": { +"$ref": "EncryptionConfig", +"description": "Optional. Encryption settings for the cluster." +}, +"endpointConfig": { +"$ref": "EndpointConfig", +"description": "Optional. Port/endpoint configuration for this cluster" +}, +"gceClusterConfig": { +"$ref": "GceClusterConfig", +"description": "Optional. The shared Compute Engine config settings for all instances in a cluster." +}, +"gkeClusterConfig": { +"$ref": "GkeClusterConfig", +"deprecated": true, +"description": "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config." +}, +"initializationActions": { +"description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi ", +"items": { +"$ref": "NodeInitializationAction" +}, +"type": "array" +}, +"lifecycleConfig": { +"$ref": "LifecycleConfig", +"description": "Optional. Lifecycle setting for the cluster." +}, +"masterConfig": { +"$ref": "InstanceGroupConfig", +"description": "Optional. The Compute Engine config settings for the cluster's master instance." +}, +"metastoreConfig": { +"$ref": "MetastoreConfig", +"description": "Optional. Metastore configuration." +}, +"secondaryWorkerConfig": { +"$ref": "InstanceGroupConfig", +"description": "Optional. The Compute Engine config settings for a cluster's secondary worker instances" +}, +"securityConfig": { +"$ref": "SecurityConfig", +"description": "Optional. Security settings for the cluster." +}, +"softwareConfig": { +"$ref": "SoftwareConfig", +"description": "Optional. The config settings for cluster software." +}, +"tempBucket": { +"description": "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", +"type": "string" +}, +"workerConfig": { +"$ref": "InstanceGroupConfig", +"description": "Optional. The Compute Engine config settings for the cluster's worker instances." +} +}, +"type": "object" +}, +"ClusterMetrics": { +"description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", +"id": "ClusterMetrics", +"properties": { +"hdfsMetrics": { +"additionalProperties": { +"format": "int64", +"type": "string" +}, +"description": "The HDFS metrics.", +"type": "object" +}, +"yarnMetrics": { +"additionalProperties": { +"format": "int64", +"type": "string" +}, +"description": "YARN metrics.", +"type": "object" +} +}, +"type": "object" +}, +"ClusterOperation": { +"description": "The cluster operation triggered by a workflow.", +"id": "ClusterOperation", +"properties": { +"done": { +"description": "Output only. Indicates the operation is done.", +"readOnly": true, +"type": "boolean" +}, +"error": { +"description": "Output only. Error, if operation failed.", +"readOnly": true, +"type": "string" +}, +"operationId": { +"description": "Output only. The id of the cluster operation.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"ClusterOperationMetadata": { +"description": "Metadata describing the operation.", +"id": "ClusterOperationMetadata", +"properties": { +"childOperationIds": { +"description": "Output only. Child operation ids", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"clusterName": { +"description": "Output only. Name of the cluster for the operation.", +"readOnly": true, +"type": "string" +}, +"clusterUuid": { +"description": "Output only. Cluster UUID for the operation.", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Output only. Short description of operation.", +"readOnly": true, +"type": "string" +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Output only. Labels associated with the operation", +"readOnly": true, +"type": "object" +}, +"operationType": { +"description": "Output only. The operation type.", +"readOnly": true, +"type": "string" +}, +"status": { +"$ref": "ClusterOperationStatus", +"description": "Output only. Current operation status.", +"readOnly": true +}, +"statusHistory": { +"description": "Output only. The previous operation status.", +"items": { +"$ref": "ClusterOperationStatus" +}, +"readOnly": true, +"type": "array" +}, +"warnings": { +"description": "Output only. Errors encountered during operation execution.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"ClusterOperationStatus": { +"description": "The status of the operation.", +"id": "ClusterOperationStatus", +"properties": { +"details": { +"description": "Output only. A message containing any operation metadata details.", +"readOnly": true, +"type": "string" +}, +"innerState": { +"description": "Output only. A message containing the detailed operation state.", +"readOnly": true, +"type": "string" +}, +"state": { +"description": "Output only. A message containing the operation state.", +"enum": [ +"UNKNOWN", +"PENDING", +"RUNNING", +"DONE" +], +"enumDescriptions": [ +"Unused.", +"The operation has been created.", +"The operation is running.", +"The operation is done; either cancelled or completed." +], +"readOnly": true, +"type": "string" +}, +"stateStartTime": { +"description": "Output only. The time this state was entered.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"ClusterSelector": { +"description": "A selector that chooses target cluster for jobs based on metadata.", +"id": "ClusterSelector", +"properties": { +"clusterLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "Required. The cluster labels. Cluster must have all labels to match.", +"type": "object" +}, +"zone": { +"description": "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.", +"type": "string" +} +}, +"type": "object" +}, +"ClusterStatus": { +"description": "The status of a cluster and its instances.", +"id": "ClusterStatus", +"properties": { +"detail": { +"description": "Optional. Output only. Details of cluster's state.", +"readOnly": true, +"type": "string" +}, +"state": { +"description": "Output only. The cluster's state.", +"enum": [ +"UNKNOWN", +"CREATING", +"RUNNING", +"ERROR", +"ERROR_DUE_TO_UPDATE", +"DELETING", +"UPDATING", +"STOPPING", +"STOPPED", +"STARTING", +"REPAIRING" +], +"enumDescriptions": [ +"The cluster state is unknown.", +"The cluster is being created and set up. It is not ready for use.", +"The cluster is currently running and healthy. It is ready for use.Note: The cluster state changes from \"creating\" to \"running\" status after the master node(s), first two primary worker nodes (and the last primary worker node if primary workers > 2) are running.", +"The cluster encountered an error. It is not ready for use.", +"The cluster has encountered an error while being updated. Jobs can be submitted to the cluster, but the cluster cannot be updated.", +"The cluster is being deleted. It cannot be used.", +"The cluster is being updated. It continues to accept and process jobs.", +"The cluster is being stopped. It cannot be used.", +"The cluster is currently stopped. It is not ready for use.", +"The cluster is being started. It is not ready for use.", +"The cluster is being repaired. It is not ready for use." +], +"readOnly": true, +"type": "string" +}, +"stateStartTime": { +"description": "Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"substate": { +"description": "Output only. Additional state information that includes status reported by the agent.", +"enum": [ +"UNSPECIFIED", +"UNHEALTHY", +"STALE_STATUS" +], +"enumDescriptions": [ +"The cluster substate is unknown.", +"The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.", +"The agent-reported status is out of date (may occur if Dataproc loses communication with Agent).Applies to RUNNING state." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"ClusterToRepair": { +"description": "Cluster to be repaired", +"id": "ClusterToRepair", +"properties": { +"clusterRepairAction": { +"description": "Required. Repair action to take on the cluster resource.", +"enum": [ +"CLUSTER_REPAIR_ACTION_UNSPECIFIED", +"REPAIR_ERROR_DUE_TO_UPDATE_CLUSTER" +], +"enumDescriptions": [ +"No action will be taken by default.", +"Repair cluster in ERROR_DUE_TO_UPDATE states." +], +"type": "string" +} +}, +"type": "object" +}, +"ConfidentialInstanceConfig": { +"description": "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", +"id": "ConfidentialInstanceConfig", +"properties": { +"enableConfidentialCompute": { +"description": "Optional. Defines whether the instance should have confidential compute enabled.", +"type": "boolean" +} +}, +"type": "object" +}, +"ConsolidatedExecutorSummary": { +"description": "Consolidated summary about executors used by the application.", +"id": "ConsolidatedExecutorSummary", +"properties": { +"activeTasks": { +"format": "int32", +"type": "integer" +}, +"completedTasks": { +"format": "int32", +"type": "integer" +}, +"count": { +"format": "int32", +"type": "integer" +}, +"diskUsed": { +"format": "int64", +"type": "string" +}, +"failedTasks": { +"format": "int32", +"type": "integer" +}, +"isExcluded": { +"format": "int32", +"type": "integer" +}, +"maxMemory": { +"format": "int64", +"type": "string" +}, +"memoryMetrics": { +"$ref": "MemoryMetrics" +}, +"memoryUsed": { +"format": "int64", +"type": "string" +}, +"rddBlocks": { +"format": "int32", +"type": "integer" +}, +"totalCores": { +"format": "int32", +"type": "integer" +}, +"totalDurationMillis": { +"format": "int64", +"type": "string" +}, +"totalGcTimeMillis": { +"format": "int64", +"type": "string" +}, +"totalInputBytes": { +"format": "int64", +"type": "string" +}, +"totalShuffleRead": { +"format": "int64", +"type": "string" +}, +"totalShuffleWrite": { +"format": "int64", +"type": "string" +}, +"totalTasks": { +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"DataprocMetricConfig": { +"description": "Dataproc metric config.", +"id": "DataprocMetricConfig", +"properties": { +"metrics": { +"description": "Required. Metrics sources to enable.", +"items": { +"$ref": "Metric" +}, +"type": "array" +} +}, +"type": "object" +}, +"DiagnoseClusterRequest": { +"description": "A request to collect cluster diagnostic information.", +"id": "DiagnoseClusterRequest", +"properties": { +"diagnosisInterval": { +"$ref": "Interval", +"description": "Optional. Time interval in which diagnosis should be carried out on the cluster." +}, +"job": { +"deprecated": true, +"description": "Optional. DEPRECATED Specifies the job on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", +"type": "string" +}, +"jobs": { +"description": "Optional. Specifies a list of jobs on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", +"items": { +"type": "string" +}, +"type": "array" +}, +"tarballAccess": { +"description": "Optional. (Optional) The access type to the diagnostic tarball. If not specified, falls back to default access of the bucket", +"enum": [ +"TARBALL_ACCESS_UNSPECIFIED", +"GOOGLE_CLOUD_SUPPORT", +"GOOGLE_DATAPROC_DIAGNOSE" +], +"enumDescriptions": [ +"Tarball Access unspecified. Falls back to default access of the bucket", +"Google Cloud Support group has read access to the diagnostic tarball", +"Google Cloud Dataproc Diagnose service account has read access to the diagnostic tarball" +], +"type": "string" +}, +"tarballGcsDir": { +"description": "Optional. (Optional) The output Cloud Storage directory for the diagnostic tarball. If not specified, a task-specific directory in the cluster's staging bucket will be used.", +"type": "string" +}, +"yarnApplicationId": { +"deprecated": true, +"description": "Optional. DEPRECATED Specifies the yarn application on which diagnosis is to be performed.", +"type": "string" +}, +"yarnApplicationIds": { +"description": "Optional. Specifies a list of yarn applications on which diagnosis is to be performed.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"DiagnoseClusterResults": { +"description": "The location of diagnostic output.", +"id": "DiagnoseClusterResults", +"properties": { +"outputUri": { +"description": "Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"DiskConfig": { +"description": "Specifies the config of disk options for a group of VM instances.", +"id": "DiskConfig", +"properties": { +"bootDiskProvisionedIops": { +"description": "Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Note: This field is only supported if boot_disk_type is hyperdisk-balanced.", +"format": "int64", +"type": "string" +}, +"bootDiskProvisionedThroughput": { +"description": "Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. Note: This field is only supported if boot_disk_type is hyperdisk-balanced.", +"format": "int64", +"type": "string" +}, +"bootDiskSizeGb": { +"description": "Optional. Size in GB of the boot disk (default is 500GB).", +"format": "int32", +"type": "integer" +}, +"bootDiskType": { +"description": "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", +"type": "string" +}, +"localSsdInterface": { +"description": "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", +"type": "string" +}, +"numLocalSsds": { +"description": "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"DriverSchedulingConfig": { +"description": "Driver scheduling configuration.", +"id": "DriverSchedulingConfig", +"properties": { +"memoryMb": { +"description": "Required. The amount of memory in MB the driver is requesting.", +"format": "int32", +"type": "integer" +}, +"vcores": { +"description": "Required. The number of vCPUs the driver is requesting.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"Empty": { +"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } ", +"id": "Empty", +"properties": {}, +"type": "object" +}, +"EncryptionConfig": { +"description": "Encryption settings for the cluster.", +"id": "EncryptionConfig", +"properties": { +"gcePdKmsKeyName": { +"description": "Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.", +"type": "string" +}, +"kmsKey": { +"description": "Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", +"type": "string" +} +}, +"type": "object" +}, +"EndpointConfig": { +"description": "Endpoint config for this cluster", +"id": "EndpointConfig", +"properties": { +"enableHttpPortAccess": { +"description": "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", +"type": "boolean" +}, +"httpPorts": { +"additionalProperties": { +"type": "string" +}, +"description": "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", +"readOnly": true, +"type": "object" +} +}, +"type": "object" +}, +"EnvironmentConfig": { +"description": "Environment configuration for a workload.", +"id": "EnvironmentConfig", +"properties": { +"executionConfig": { +"$ref": "ExecutionConfig", +"description": "Optional. Execution configuration for a workload." +}, +"peripheralsConfig": { +"$ref": "PeripheralsConfig", +"description": "Optional. Peripherals configuration that workload has access to." +} +}, +"type": "object" +}, +"ExecutionConfig": { +"description": "Execution configuration for a workload.", +"id": "ExecutionConfig", +"properties": { +"idleTtl": { +"description": "Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", +"format": "google-duration", +"type": "string" +}, +"kmsKey": { +"description": "Optional. The Cloud KMS key to use for encryption.", +"type": "string" +}, +"networkTags": { +"description": "Optional. Tags used for network traffic control.", +"items": { +"type": "string" +}, +"type": "array" +}, +"networkUri": { +"description": "Optional. Network URI to connect workload to.", +"type": "string" +}, +"serviceAccount": { +"description": "Optional. Service account that used to execute workload.", +"type": "string" +}, +"stagingBucket": { +"description": "Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", +"type": "string" +}, +"subnetworkUri": { +"description": "Optional. Subnetwork URI to connect workload to.", +"type": "string" +}, +"ttl": { +"description": "Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, +"ExecutorMetrics": { +"id": "ExecutorMetrics", +"properties": { +"metrics": { +"additionalProperties": { +"format": "int64", +"type": "string" +}, +"type": "object" +} +}, +"type": "object" +}, +"ExecutorMetricsDistributions": { +"id": "ExecutorMetricsDistributions", +"properties": { +"diskBytesSpilled": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"failedTasks": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"inputBytes": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"inputRecords": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"killedTasks": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"memoryBytesSpilled": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"outputBytes": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"outputRecords": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"peakMemoryMetrics": { +"$ref": "ExecutorPeakMetricsDistributions" +}, +"quantiles": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"shuffleRead": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"shuffleReadRecords": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"shuffleWrite": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"shuffleWriteRecords": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"succeededTasks": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +}, +"taskTimeMillis": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, +"ExecutorPeakMetricsDistributions": { +"id": "ExecutorPeakMetricsDistributions", +"properties": { +"executorMetrics": { +"items": { +"$ref": "ExecutorMetrics" +}, +"type": "array" +}, +"quantiles": { +"items": { +"format": "double", +"type": "number" +}, +"type": "array" +} +}, +"type": "object" +}, +"ExecutorResourceRequest": { +"description": "Resources used per executor used by the application.", +"id": "ExecutorResourceRequest", +"properties": { +"amount": { +"format": "int64", +"type": "string" +}, +"discoveryScript": { +"type": "string" +}, +"resourceName": { +"type": "string" +}, +"vendor": { +"type": "string" +} +}, +"type": "object" +}, +"ExecutorStageSummary": { +"description": "Executor resources consumed by a stage.", +"id": "ExecutorStageSummary", +"properties": { +"diskBytesSpilled": { +"format": "int64", +"type": "string" +}, +"executorId": { +"type": "string" +}, +"failedTasks": { +"format": "int32", +"type": "integer" +}, +"inputBytes": { +"format": "int64", +"type": "string" +}, +"inputRecords": { +"format": "int64", +"type": "string" +}, +"isExcludedForStage": { +"type": "boolean" +}, +"killedTasks": { +"format": "int32", +"type": "integer" +}, +"memoryBytesSpilled": { +"format": "int64", +"type": "string" +}, +"outputBytes": { +"format": "int64", +"type": "string" +}, +"outputRecords": { +"format": "int64", +"type": "string" +}, +"peakMemoryMetrics": { +"$ref": "ExecutorMetrics" +}, +"shuffleRead": { +"format": "int64", +"type": "string" +}, +"shuffleReadRecords": { +"format": "int64", +"type": "string" +}, +"shuffleWrite": { +"format": "int64", +"type": "string" +}, +"shuffleWriteRecords": { +"format": "int64", +"type": "string" +}, +"stageAttemptId": { +"format": "int32", +"type": "integer" +}, +"stageId": { +"format": "int64", +"type": "string" +}, +"succeededTasks": { +"format": "int32", +"type": "integer" +}, +"taskTimeMillis": { +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, +"ExecutorSummary": { +"description": "Details about executors used by the application.", +"id": "ExecutorSummary", +"properties": { +"activeTasks": { +"format": "int32", +"type": "integer" +}, +"addTime": { +"format": "google-datetime", +"type": "string" +}, +"attributes": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"completedTasks": { +"format": "int32", +"type": "integer" +}, +"diskUsed": { +"format": "int64", +"type": "string" +}, +"excludedInStages": { +"items": { +"format": "int64", +"type": "string" +}, +"type": "array" +}, +"executorId": { +"type": "string" +}, +"executorLogs": { +"additionalProperties": { +"type": "string" +}, +"type": "object" +}, +"failedTasks": { +"format": "int32", +"type": "integer" +}, +"hostPort": { +"type": "string" +}, +"isActive": { +"type": "boolean" +}, +"isExcluded": { +"type": "boolean" +}, +"maxMemory": { +"format": "int64", +"type": "string" +}, +"maxTasks": { +"format": "int32", +"type": "integer" +}, +"memoryMetrics": { +"$ref": "MemoryMetrics" +}, +"memoryUsed": { +"format": "int64", +"type": "string" +}, +"peakMemoryMetrics": { +"$ref": "ExecutorMetrics" +}, +"rddBlocks": { +"format": "int32", +"type": "integer" +}, +"removeReason": { +"type": "string" +}, +"removeTime": { +"format": "google-datetime", +"type": "string" +}, +"resourceProfileId": { +"format": "int32", +"type": "integer" +}, +"resources": { +"additionalProperties": { +"$ref": "ResourceInformation" +}, +"type": "object" +}, +"totalCores": { +"format": "int32", +"type": "integer" +}, +"totalDurationMillis": { +"format": "int64", +"type": "string" +}, +"totalGcTimeMillis": { +"format": "int64", +"type": "string" +}, +"totalInputBytes": { +"format": "int64", +"type": "string" +}, +"totalShuffleRead": { +"format": "int64", +"type": "string" +}, +"totalShuffleWrite": { +"format": "int64", +"type": "string" +}, +"totalTasks": { +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"Expr": { +"description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", +"id": "Expr", +"properties": { +"description": { +"description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", +"type": "string" +}, +"expression": { +"description": "Textual representation of an expression in Common Expression Language syntax.", +"type": "string" +}, +"location": { +"description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", +"type": "string" +}, +"title": { +"description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", +"type": "string" +} +}, +"type": "object" +}, +"FlinkJob": { +"description": "A Dataproc job for running Apache Flink applications on YARN.", +"id": "FlinkJob", +"properties": { +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", +"items": { +"type": "string" +}, +"type": "array" +}, +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.", +"items": { +"type": "string" +}, +"type": "array" +}, +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." +}, +"mainClass": { +"description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.", +"type": "string" +}, +"mainJarFileUri": { +"description": "The HCFS URI of the jar file that contains the main class.", +"type": "string" +}, +"properties": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/flink/conf/flink-defaults.conf and classes in user code.", +"type": "object" +}, +"savepointUri": { +"description": "Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.", +"type": "string" +} +}, +"type": "object" +}, +"GceClusterConfig": { +"description": "Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.", +"id": "GceClusterConfig", +"properties": { +"confidentialInstanceConfig": { +"$ref": "ConfidentialInstanceConfig", +"description": "Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)." +}, +"internalIpOnly": { +"description": "Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM.", +"type": "boolean" +}, +"metadata": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", +"type": "object" +}, +"networkUri": { +"description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default", +"type": "string" +}, +"nodeGroupAffinity": { +"$ref": "NodeGroupAffinity", +"description": "Optional. Node Group Affinity for sole-tenant clusters." +}, +"privateIpv6GoogleAccess": { +"description": "Optional. The type of IPv6 access for a cluster.", +"enum": [ +"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", +"INHERIT_FROM_SUBNETWORK", +"OUTBOUND", +"BIDIRECTIONAL" +], +"enumDescriptions": [ +"If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.", +"Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.", +"Enables outbound private IPv6 access to Google Services from the Dataproc cluster.", +"Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster." +], +"type": "string" +}, +"reservationAffinity": { +"$ref": "ReservationAffinity", +"description": "Optional. Reservation Affinity for consuming Zonal reservation." +}, +"serviceAccount": { +"description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", +"type": "string" +}, +"serviceAccountScopes": { +"description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control", +"items": { +"type": "string" +}, +"type": "array" +}, +"shieldedInstanceConfig": { +"$ref": "ShieldedInstanceConfig", +"description": "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm)." +}, +"subnetworkUri": { +"description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0", +"type": "string" +}, +"tags": { +"description": "The Compute Engine network tags to add to all instances (see Tagging instances (https://cloud.google.com/vpc/docs/add-remove-network-tags)).", +"items": { +"type": "string" +}, +"type": "array" +}, +"zoneUri": { +"description": "Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]", +"type": "string" +} +}, +"type": "object" +}, +"GetIamPolicyRequest": { +"description": "Request message for GetIamPolicy method.", +"id": "GetIamPolicyRequest", +"properties": { +"options": { +"$ref": "GetPolicyOptions", +"description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy." +} +}, +"type": "object" +}, +"GetPolicyOptions": { +"description": "Encapsulates settings provided to GetIamPolicy.", +"id": "GetPolicyOptions", +"properties": { +"requestedPolicyVersion": { +"description": "Optional. The maximum policy version that will be used to format the policy.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset.The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GkeClusterConfig": { +"description": "The cluster's GKE config.", +"id": "GkeClusterConfig", +"properties": { +"gkeClusterTarget": { +"description": "Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", +"type": "string" +}, +"namespacedGkeDeploymentTarget": { +"$ref": "NamespacedGkeDeploymentTarget", +"deprecated": true, +"description": "Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment." +}, +"nodePoolTarget": { +"description": "Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.", +"items": { +"$ref": "GkeNodePoolTarget" +}, +"type": "array" +} +}, +"type": "object" +}, +"GkeNodeConfig": { +"description": "Parameters that describe cluster nodes.", +"id": "GkeNodeConfig", +"properties": { +"accelerators": { +"description": "Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.", +"items": { +"$ref": "GkeNodePoolAcceleratorConfig" +}, +"type": "array" +}, +"bootDiskKmsKey": { +"description": "Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}", +"type": "string" +}, +"localSsdCount": { +"description": "Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).", +"format": "int32", +"type": "integer" +}, +"machineType": { +"description": "Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).", +"type": "string" +}, +"minCpuPlatform": { +"description": "Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\"` or Intel Sandy Bridge\".", +"type": "string" +}, +"preemptible": { +"description": "Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", +"type": "boolean" +}, +"spot": { +"description": "Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", +"type": "boolean" +} +}, +"type": "object" +}, +"GkeNodePoolAcceleratorConfig": { +"description": "A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.", +"id": "GkeNodePoolAcceleratorConfig", +"properties": { +"acceleratorCount": { +"description": "The number of accelerator cards exposed to an instance.", +"format": "int64", +"type": "string" +}, +"acceleratorType": { +"description": "The accelerator type resource namename (see GPUs on Compute Engine).", +"type": "string" +}, +"gpuPartitionSize": { +"description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", +"type": "string" +} +}, +"type": "object" +}, +"GkeNodePoolAutoscalingConfig": { +"description": "GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.", +"id": "GkeNodePoolAutoscalingConfig", +"properties": { +"maxNodeCount": { +"description": "The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.", +"format": "int32", +"type": "integer" +}, +"minNodeCount": { +"description": "The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GkeNodePoolConfig": { +"description": "The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).", +"id": "GkeNodePoolConfig", +"properties": { +"autoscaling": { +"$ref": "GkeNodePoolAutoscalingConfig", +"description": "Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present." +}, +"config": { +"$ref": "GkeNodeConfig", +"description": "Optional. The node pool configuration." +}, +"locations": { +"description": "Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GkeNodePoolTarget": { +"description": "GKE node pools that Dataproc workloads run on.", +"id": "GkeNodePoolTarget", +"properties": { +"nodePool": { +"description": "Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'", +"type": "string" +}, +"nodePoolConfig": { +"$ref": "GkeNodePoolConfig", +"description": "Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API." +}, +"roles": { +"description": "Required. The roles associated with the GKE node pool.", +"items": { +"enum": [ +"ROLE_UNSPECIFIED", +"DEFAULT", +"CONTROLLER", +"SPARK_DRIVER", +"SPARK_EXECUTOR" +], +"enumDescriptions": [ +"Role is unspecified.", +"At least one node pool must have the DEFAULT role. Work assigned to a role that is not associated with a node pool is assigned to the node pool with the DEFAULT role. For example, work assigned to the CONTROLLER role will be assigned to the node pool with the DEFAULT role if no node pool has the CONTROLLER role.", +"Run work associated with the Dataproc control plane (for example, controllers and webhooks). Very low resource requirements.", +"Run work associated with a Spark driver of a job.", +"Run work associated with a Spark executor of a job." +], +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig": { +"description": "Encryption settings for encrypting workflow template job arguments.", +"id": "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig", +"properties": { +"kmsKey": { +"description": "Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", +"type": "string" +} +}, +"type": "object" +}, +"HadoopJob": { +"description": "A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", +"id": "HadoopJob", +"properties": { +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", +"items": { +"type": "string" +}, +"type": "array" +}, +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", +"items": { +"type": "string" +}, +"type": "array" +}, +"fileUris": { +"description": "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", +"items": { +"type": "string" +}, +"type": "array" +}, +"jarFileUris": { +"description": "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", +"items": { +"type": "string" +}, +"type": "array" +}, +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." +}, +"mainClass": { +"description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", +"type": "string" +}, +"mainJarFileUri": { +"description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", +"type": "string" +}, +"properties": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", +"type": "object" +} +}, +"type": "object" +}, +"HiveJob": { +"description": "A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.", +"id": "HiveJob", +"properties": { +"continueOnFailure": { +"description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", +"type": "boolean" +}, +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", +"items": { +"type": "string" +}, +"type": "array" +}, +"properties": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", +"type": "object" +}, +"queryFileUri": { +"description": "The HCFS URI of the script that contains Hive queries.", +"type": "string" +}, +"queryList": { +"$ref": "QueryList", +"description": "A list of queries." +}, +"scriptVariables": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).", +"type": "object" +} +}, +"type": "object" +}, +"IdentityConfig": { +"description": "Identity related configuration, including service account based secure multi-tenancy user mappings.", +"id": "IdentityConfig", +"properties": { +"userServiceAccountMapping": { +"additionalProperties": { +"type": "string" +}, +"description": "Required. Map of user to service account.", +"type": "object" +} +}, +"type": "object" +}, +"InjectCredentialsRequest": { +"description": "A request to inject credentials into a cluster.", +"id": "InjectCredentialsRequest", +"properties": { +"clusterUuid": { +"description": "Required. The cluster UUID.", +"type": "string" +}, +"credentialsCiphertext": { +"description": "Required. The encrypted credentials being injected in to the cluster.The client is responsible for encrypting the credentials in a way that is supported by the cluster.A wrapped value is used here so that the actual contents of the encrypted credentials are not written to audit logs.", +"type": "string" +} +}, +"type": "object" +}, +"InputMetrics": { +"description": "Metrics about the input data read by the task.", +"id": "InputMetrics", +"properties": { +"bytesRead": { +"format": "int64", +"type": "string" +}, +"recordsRead": { +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, +"InputQuantileMetrics": { +"id": "InputQuantileMetrics", +"properties": { +"bytesRead": { +"$ref": "Quantiles" +}, +"recordsRead": { +"$ref": "Quantiles" +} +}, +"type": "object" +}, +"InstanceFlexibilityPolicy": { +"description": "Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.", +"id": "InstanceFlexibilityPolicy", +"properties": { +"instanceSelectionList": { +"description": "Optional. List of instance selection options that the group will use when creating new VMs.", +"items": { +"$ref": "InstanceSelection" +}, +"type": "array" +}, +"instanceSelectionResults": { +"description": "Output only. A list of instance selection results in the group.", +"items": { +"$ref": "InstanceSelectionResult" +}, +"readOnly": true, +"type": "array" +}, +"provisioningModelMix": { +"$ref": "ProvisioningModelMix", +"description": "Optional. Defines how the Group selects the provisioning model to ensure required reliability." +} +}, +"type": "object" +}, +"InstanceGroupAutoscalingPolicyConfig": { +"description": "Configuration for the size bounds of an instance group, including its proportional size to other groups.", +"id": "InstanceGroupAutoscalingPolicyConfig", +"properties": { +"maxInstances": { +"description": "Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", +"format": "int32", +"type": "integer" +}, +"minInstances": { +"description": "Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.", +"format": "int32", +"type": "integer" +}, +"weight": { +"description": "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"InstanceGroupConfig": { +"description": "The config settings for Compute Engine resources in an instance group, such as a master or worker group.", +"id": "InstanceGroupConfig", +"properties": { +"accelerators": { +"description": "Optional. The Compute Engine accelerator configuration for these instances.", +"items": { +"$ref": "AcceleratorConfig" +}, +"type": "array" +}, +"diskConfig": { +"$ref": "DiskConfig", +"description": "Optional. Disk option config settings." +}, +"imageUri": { +"description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", +"type": "string" +}, +"instanceFlexibilityPolicy": { +"$ref": "InstanceFlexibilityPolicy", +"description": "Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models." +}, +"instanceNames": { +"description": "Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"instanceReferences": { +"description": "Output only. List of references to Compute Engine instances.", +"items": { +"$ref": "InstanceReference" +}, +"readOnly": true, +"type": "array" +}, +"isPreemptible": { +"description": "Output only. Specifies that this instance group contains preemptible instances.", +"readOnly": true, +"type": "boolean" +}, +"machineTypeUri": { +"description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", +"type": "string" +}, +"managedGroupConfig": { +"$ref": "ManagedGroupConfig", +"description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", +"readOnly": true +}, +"minCpuPlatform": { +"description": "Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", +"type": "string" +}, +"minNumInstances": { +"description": "Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.", +"format": "int32", +"type": "integer" +}, +"numInstances": { +"description": "Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.", +"format": "int32", +"type": "integer" +}, +"preemptibility": { +"description": "Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.", +"enum": [ +"PREEMPTIBILITY_UNSPECIFIED", +"NON_PREEMPTIBLE", +"PREEMPTIBLE", +"SPOT" +], +"enumDescriptions": [ +"Preemptibility is unspecified, the system will choose the appropriate setting for each instance group.", +"Instances are non-preemptible.This option is allowed for all instance groups and is the only valid value for Master and Worker instance groups.", +"Instances are preemptible (https://cloud.google.com/compute/docs/instances/preemptible).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups.", +"Instances are Spot VMs (https://cloud.google.com/compute/docs/instances/spot).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups. Spot VMs are the latest version of preemptible VMs (https://cloud.google.com/compute/docs/instances/preemptible), and provide additional features." +], +"type": "string" +}, +"startupConfig": { +"$ref": "StartupConfig", +"description": "Optional. Configuration to handle the startup of instances during cluster create and update process." +} +}, +"type": "object" +}, +"InstanceReference": { +"description": "A reference to a Compute Engine instance.", +"id": "InstanceReference", +"properties": { +"instanceId": { +"description": "The unique identifier of the Compute Engine instance.", +"type": "string" +}, +"instanceName": { +"description": "The user-friendly name of the Compute Engine instance.", +"type": "string" +}, +"publicEciesKey": { +"description": "The public ECIES key used for sharing data with this instance.", +"type": "string" +}, +"publicKey": { +"description": "The public RSA key used for sharing data with this instance.", +"type": "string" +} +}, +"type": "object" +}, +"InstanceSelection": { +"description": "Defines machines types and a rank to which the machines types belong.", +"id": "InstanceSelection", +"properties": { +"machineTypes": { +"description": "Optional. Full machine-type names, e.g. \"n1-standard-16\".", +"items": { +"type": "string" +}, +"type": "array" +}, +"rank": { +"description": "Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"InstanceSelectionResult": { +"description": "Defines a mapping from machine types to the number of VMs that are created with each machine type.", +"id": "InstanceSelectionResult", +"properties": { +"machineType": { +"description": "Output only. Full machine-type names, e.g. \"n1-standard-16\".", +"readOnly": true, +"type": "string" +}, +"vmCount": { +"description": "Output only. Number of VM provisioned with the machine_type.", +"format": "int32", +"readOnly": true, +"type": "integer" +} +}, +"type": "object" +}, +"InstantiateWorkflowTemplateRequest": { +"description": "A request to instantiate a workflow template.", +"id": "InstantiateWorkflowTemplateRequest", +"properties": { +"parameters": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters.", +"type": "object" +}, +"requestId": { +"description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "string" +}, +"version": { +"description": "Optional. The version of workflow template to instantiate. If specified, the workflow will be instantiated only if the current version of the workflow template has the supplied version.This option cannot be used to instantiate a previous version of workflow template.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"Interval": { +"description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive).The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", +"id": "Interval", +"properties": { +"endTime": { +"description": "Optional. Exclusive end of the interval.If specified, a Timestamp matching this interval will have to be before the end.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. Inclusive start of the interval.If specified, a Timestamp matching this interval will have to be the same or after the start.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"Job": { +"description": "A Dataproc job resource.", +"id": "Job", +"properties": { +"done": { +"description": "Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.", +"readOnly": true, +"type": "boolean" +}, +"driverControlFilesUri": { +"description": "Output only. If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.", +"readOnly": true, +"type": "string" +}, +"driverOutputResourceUri": { +"description": "Output only. A URI pointing to the location of the stdout of the job's driver program.", +"readOnly": true, +"type": "string" +}, +"driverSchedulingConfig": { +"$ref": "DriverSchedulingConfig", +"description": "Optional. Driver scheduling configuration." +}, +"flinkJob": { +"$ref": "FlinkJob", +"description": "Optional. Job is a Flink job." +}, +"hadoopJob": { +"$ref": "HadoopJob", +"description": "Optional. Job is a Hadoop job." +}, +"hiveJob": { +"$ref": "HiveJob", +"description": "Optional. Job is a Hive job." +}, +"jobUuid": { +"description": "Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.", +"readOnly": true, "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, -"description": "Output only. Labels associated with the operation.", +"description": "Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.", +"type": "object" +}, +"pigJob": { +"$ref": "PigJob", +"description": "Optional. Job is a Pig job." +}, +"placement": { +"$ref": "JobPlacement", +"description": "Required. Job information, including how, when, and where to run the job." +}, +"prestoJob": { +"$ref": "PrestoJob", +"description": "Optional. Job is a Presto job." +}, +"pysparkJob": { +"$ref": "PySparkJob", +"description": "Optional. Job is a PySpark job." +}, +"reference": { +"$ref": "JobReference", +"description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." +}, +"scheduling": { +"$ref": "JobScheduling", +"description": "Optional. Job scheduling configuration." +}, +"sparkJob": { +"$ref": "SparkJob", +"description": "Optional. Job is a Spark job." +}, +"sparkRJob": { +"$ref": "SparkRJob", +"description": "Optional. Job is a SparkR job." +}, +"sparkSqlJob": { +"$ref": "SparkSqlJob", +"description": "Optional. Job is a SparkSql job." +}, +"status": { +"$ref": "JobStatus", +"description": "Output only. The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.", +"readOnly": true +}, +"statusHistory": { +"description": "Output only. The previous job status.", +"items": { +"$ref": "JobStatus" +}, +"readOnly": true, +"type": "array" +}, +"trinoJob": { +"$ref": "TrinoJob", +"description": "Optional. Job is a Trino job." +}, +"yarnApplications": { +"description": "Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.", +"items": { +"$ref": "YarnApplication" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"JobData": { +"description": "Data corresponding to a spark job.", +"id": "JobData", +"properties": { +"completionTime": { +"format": "google-datetime", +"type": "string" +}, +"description": { +"type": "string" +}, +"jobGroup": { +"type": "string" +}, +"jobId": { +"format": "int64", +"type": "string" +}, +"killTasksSummary": { +"additionalProperties": { +"format": "int32", +"type": "integer" +}, +"type": "object" +}, +"name": { +"type": "string" +}, +"numActiveStages": { +"format": "int32", +"type": "integer" +}, +"numActiveTasks": { +"format": "int32", +"type": "integer" +}, +"numCompletedIndices": { +"format": "int32", +"type": "integer" +}, +"numCompletedStages": { +"format": "int32", +"type": "integer" +}, +"numCompletedTasks": { +"format": "int32", +"type": "integer" +}, +"numFailedStages": { +"format": "int32", +"type": "integer" +}, +"numFailedTasks": { +"format": "int32", +"type": "integer" +}, +"numKilledTasks": { +"format": "int32", +"type": "integer" +}, +"numSkippedStages": { +"format": "int32", +"type": "integer" +}, +"numSkippedTasks": { +"format": "int32", +"type": "integer" +}, +"numTasks": { +"format": "int32", +"type": "integer" +}, +"skippedStages": { +"items": { +"format": "int32", +"type": "integer" +}, +"type": "array" +}, +"sqlExecutionId": { +"format": "int64", +"type": "string" +}, +"stageIds": { +"items": { +"format": "int64", +"type": "string" +}, +"type": "array" +}, +"status": { +"enum": [ +"JOB_EXECUTION_STATUS_UNSPECIFIED", +"JOB_EXECUTION_STATUS_RUNNING", +"JOB_EXECUTION_STATUS_SUCCEEDED", +"JOB_EXECUTION_STATUS_FAILED", +"JOB_EXECUTION_STATUS_UNKNOWN" +], +"enumDescriptions": [ +"", +"", +"", +"", +"" +], +"type": "string" +}, +"submissionTime": { +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, +"JobMetadata": { +"description": "Job Operation metadata.", +"id": "JobMetadata", +"properties": { +"jobId": { +"description": "Output only. The job id.", +"readOnly": true, +"type": "string" +}, +"operationType": { +"description": "Output only. Operation type.", +"readOnly": true, +"type": "string" +}, +"startTime": { +"description": "Output only. Job submission time.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"status": { +"$ref": "JobStatus", +"description": "Output only. Most recent job status.", +"readOnly": true +} +}, +"type": "object" +}, +"JobPlacement": { +"description": "Dataproc job config.", +"id": "JobPlacement", +"properties": { +"clusterLabels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. Cluster labels to identify a cluster where the job will be submitted.", +"type": "object" +}, +"clusterName": { +"description": "Required. The name of the cluster where the job will be submitted.", +"type": "string" +}, +"clusterUuid": { +"description": "Output only. A cluster UUID generated by the Dataproc service when the job is submitted.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"JobReference": { +"description": "Encapsulates the full scoping used to reference a job.", +"id": "JobReference", +"properties": { +"jobId": { +"description": "Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.", +"type": "string" +}, +"projectId": { +"description": "Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.", +"type": "string" +} +}, +"type": "object" +}, +"JobScheduling": { +"description": "Job scheduling options.", +"id": "JobScheduling", +"properties": { +"maxFailuresPerHour": { +"description": "Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", +"format": "int32", +"type": "integer" +}, +"maxFailuresTotal": { +"description": "Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"JobStatus": { +"description": "Dataproc job status.", +"id": "JobStatus", +"properties": { +"details": { +"description": "Optional. Output only. Job state details, such as an error description if the state is ERROR.", +"readOnly": true, +"type": "string" +}, +"state": { +"description": "Output only. A state message specifying the overall job state.", +"enum": [ +"STATE_UNSPECIFIED", +"PENDING", +"SETUP_DONE", +"RUNNING", +"CANCEL_PENDING", +"CANCEL_STARTED", +"CANCELLED", +"DONE", +"ERROR", +"ATTEMPT_FAILURE" +], +"enumDescriptions": [ +"The job state is unknown.", +"The job is pending; it has been submitted, but is not yet running.", +"Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.", +"The job is running on the cluster.", +"A CancelJob request has been received, but is pending.", +"Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", +"The job cancellation was successful.", +"The job has completed successfully.", +"The job has completed, but encountered an error.", +"Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." +], "readOnly": true, -"type": "object" -}, -"warnings": { -"description": "Output only. Warnings encountered during operation execution.", -"items": { "type": "string" }, +"stateStartTime": { +"description": "Output only. The time when this state was entered.", +"format": "google-datetime", "readOnly": true, -"type": "array" -} -}, -"type": "object" +"type": "string" }, -"AutoscalingConfig": { -"description": "Autoscaling Policy config associated with the cluster.", -"id": "AutoscalingConfig", -"properties": { -"policyUri": { -"description": "Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.", +"substate": { +"description": "Output only. Additional state information, which includes status reported by the agent.", +"enum": [ +"UNSPECIFIED", +"SUBMITTED", +"QUEUED", +"STALE_STATUS" +], +"enumDescriptions": [ +"The job substate is unknown.", +"The Job is submitted to the agent.Applies to RUNNING state.", +"The Job has been received and is awaiting execution (it might be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.", +"The agent-reported status is out of date, which can be caused by a loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state." +], +"readOnly": true, "type": "string" } }, "type": "object" }, -"AutoscalingPolicy": { -"description": "Describes an autoscaling policy for Dataproc cluster autoscaler.", -"id": "AutoscalingPolicy", +"JobsSummary": { +"description": "Data related to Jobs page summary", +"id": "JobsSummary", "properties": { -"basicAlgorithm": { -"$ref": "BasicAutoscalingAlgorithm" +"activeJobs": { +"description": "Number of active jobs", +"format": "int32", +"type": "integer" }, -"id": { -"description": "Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", +"applicationId": { +"description": "Spark Application Id", "type": "string" }, -"labels": { -"additionalProperties": { -"type": "string" +"attempts": { +"description": "Attempts info", +"items": { +"$ref": "ApplicationAttemptInfo" }, -"description": "Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.", -"type": "object" +"type": "array" }, -"name": { -"description": "Output only. The \"resource name\" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}", -"readOnly": true, -"type": "string" +"completedJobs": { +"description": "Number of completed jobs", +"format": "int32", +"type": "integer" }, -"secondaryWorkerConfig": { -"$ref": "InstanceGroupAutoscalingPolicyConfig", -"description": "Optional. Describes how the autoscaler will operate for secondary workers." +"failedJobs": { +"description": "Number of failed jobs", +"format": "int32", +"type": "integer" }, -"workerConfig": { -"$ref": "InstanceGroupAutoscalingPolicyConfig", -"description": "Required. Describes how the autoscaler will operate for primary workers." +"schedulingMode": { +"description": "Spark Scheduling mode", +"type": "string" } }, "type": "object" }, -"AutotuningConfig": { -"description": "Autotuning configuration of the workload.", -"id": "AutotuningConfig", +"JupyterConfig": { +"description": "Jupyter configuration for an interactive session.", +"id": "JupyterConfig", "properties": { -"scenarios": { -"description": "Optional. Scenarios for which tunings are applied.", -"items": { +"displayName": { +"description": "Optional. Display name, shown in the Jupyter kernelspec card.", +"type": "string" +}, +"kernel": { +"description": "Optional. Kernel", "enum": [ -"SCENARIO_UNSPECIFIED", -"SCALING", -"BROADCAST_HASH_JOIN", -"MEMORY" +"KERNEL_UNSPECIFIED", +"PYTHON", +"SCALA" ], "enumDescriptions": [ -"Default value.", -"Scaling recommendations such as initialExecutors.", -"Adding hints for potential relation broadcasts.", -"Memory management for workloads." +"The kernel is unknown.", +"Python kernel.", +"Scala kernel." ], "type": "string" -}, -"type": "array" } }, "type": "object" }, -"AuxiliaryNodeGroup": { -"description": "Node group identification and configuration information.", -"id": "AuxiliaryNodeGroup", +"KerberosConfig": { +"description": "Specifies Kerberos related configuration.", +"id": "KerberosConfig", "properties": { -"nodeGroup": { -"$ref": "NodeGroup", -"description": "Required. Node group configuration." +"crossRealmTrustAdminServer": { +"description": "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", +"type": "string" }, -"nodeGroupId": { -"description": "Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.", +"crossRealmTrustKdc": { +"description": "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", "type": "string" -} }, -"type": "object" +"crossRealmTrustRealm": { +"description": "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", +"type": "string" }, -"AuxiliaryServicesConfig": { -"description": "Auxiliary services configuration for a Cluster.", -"id": "AuxiliaryServicesConfig", -"properties": { -"metastoreConfig": { -"$ref": "MetastoreConfig", -"description": "Optional. The Hive Metastore configuration for this workload." +"crossRealmTrustSharedPasswordUri": { +"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", +"type": "string" }, -"sparkHistoryServerConfig": { -"$ref": "SparkHistoryServerConfig", -"description": "Optional. The Spark History Server configuration for the workload." +"enableKerberos": { +"description": "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", +"type": "boolean" +}, +"kdcDbKeyUri": { +"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", +"type": "string" +}, +"keyPasswordUri": { +"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", +"type": "string" +}, +"keystorePasswordUri": { +"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", +"type": "string" +}, +"keystoreUri": { +"description": "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", +"type": "string" +}, +"kmsKeyUri": { +"description": "Optional. The URI of the KMS key used to encrypt sensitive files.", +"type": "string" +}, +"realm": { +"description": "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", +"type": "string" +}, +"rootPrincipalPasswordUri": { +"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", +"type": "string" +}, +"tgtLifetimeHours": { +"description": "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", +"format": "int32", +"type": "integer" +}, +"truststorePasswordUri": { +"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", +"type": "string" +}, +"truststoreUri": { +"description": "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", +"type": "string" } }, "type": "object" }, -"BasicAutoscalingAlgorithm": { -"description": "Basic algorithm for autoscaling.", -"id": "BasicAutoscalingAlgorithm", +"KubernetesClusterConfig": { +"description": "The configuration for running the Dataproc cluster on Kubernetes.", +"id": "KubernetesClusterConfig", "properties": { -"cooldownPeriod": { -"description": "Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.", -"format": "google-duration", -"type": "string" +"gkeClusterConfig": { +"$ref": "GkeClusterConfig", +"description": "Required. The configuration for running the Dataproc cluster on GKE." }, -"sparkStandaloneConfig": { -"$ref": "SparkStandaloneAutoscalingConfig", -"description": "Optional. Spark Standalone autoscaling configuration" +"kubernetesNamespace": { +"description": "Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.", +"type": "string" }, -"yarnConfig": { -"$ref": "BasicYarnAutoscalingConfig", -"description": "Optional. YARN autoscaling configuration." +"kubernetesSoftwareConfig": { +"$ref": "KubernetesSoftwareConfig", +"description": "Optional. The software configuration for this Dataproc cluster running on Kubernetes." } }, "type": "object" }, -"BasicYarnAutoscalingConfig": { -"description": "Basic autoscaling configurations for YARN.", -"id": "BasicYarnAutoscalingConfig", +"KubernetesSoftwareConfig": { +"description": "The software configuration for this Dataproc cluster running on Kubernetes.", +"id": "KubernetesSoftwareConfig", "properties": { -"gracefulDecommissionTimeout": { -"description": "Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.", -"format": "google-duration", +"componentVersion": { +"additionalProperties": { "type": "string" }, -"scaleDownFactor": { -"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", -"format": "double", -"type": "number" -}, -"scaleDownMinWorkerFraction": { -"description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", -"format": "double", -"type": "number" +"description": "The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.", +"type": "object" }, -"scaleUpFactor": { -"description": "Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.", -"format": "double", -"type": "number" +"properties": { +"additionalProperties": { +"type": "string" }, -"scaleUpMinWorkerFraction": { -"description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", -"format": "double", -"type": "number" +"description": "The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", +"type": "object" } }, "type": "object" }, -"Batch": { -"description": "A representation of a batch workload in the service.", -"id": "Batch", +"LifecycleConfig": { +"description": "Specifies the cluster auto-delete schedule configuration.", +"id": "LifecycleConfig", "properties": { -"createTime": { -"description": "Output only. The time when the batch was created.", +"autoDeleteTime": { +"description": "Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", "format": "google-datetime", -"readOnly": true, "type": "string" }, -"creator": { -"description": "Output only. The email address of the user who created the batch.", -"readOnly": true, +"autoDeleteTtl": { +"description": "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-duration", "type": "string" }, -"environmentConfig": { -"$ref": "EnvironmentConfig", -"description": "Optional. Environment configuration for the batch execution." +"idleDeleteTtl": { +"description": "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-duration", +"type": "string" }, -"labels": { -"additionalProperties": { +"idleStartTime": { +"description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", +"format": "google-datetime", +"readOnly": true, "type": "string" +} }, -"description": "Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.", "type": "object" }, -"name": { -"description": "Output only. The resource name of the batch.", +"ListAutoscalingPoliciesResponse": { +"description": "A response to a request to list autoscaling policies in a project.", +"id": "ListAutoscalingPoliciesResponse", +"properties": { +"nextPageToken": { +"description": "Output only. This token is included in the response if there are more results to fetch.", "readOnly": true, "type": "string" }, -"operation": { -"description": "Output only. The resource name of the operation associated with this batch.", -"readOnly": true, -"type": "string" +"policies": { +"description": "Output only. Autoscaling policies list.", +"items": { +"$ref": "AutoscalingPolicy" }, -"pysparkBatch": { -"$ref": "PySparkBatch", -"description": "Optional. PySpark batch config." +"readOnly": true, +"type": "array" +} }, -"runtimeConfig": { -"$ref": "RuntimeConfig", -"description": "Optional. Runtime configuration for the batch execution." +"type": "object" }, -"runtimeInfo": { -"$ref": "RuntimeInfo", -"description": "Output only. Runtime information about batch execution.", -"readOnly": true +"ListBatchesResponse": { +"description": "A list of batch workloads.", +"id": "ListBatchesResponse", +"properties": { +"batches": { +"description": "Output only. The batches from the specified collection.", +"items": { +"$ref": "Batch" }, -"sparkBatch": { -"$ref": "SparkBatch", -"description": "Optional. Spark batch config." +"readOnly": true, +"type": "array" }, -"sparkRBatch": { -"$ref": "SparkRBatch", -"description": "Optional. SparkR batch config." +"nextPageToken": { +"description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"type": "string" }, -"sparkSqlBatch": { -"$ref": "SparkSqlBatch", -"description": "Optional. SparkSql batch config." +"unreachable": { +"description": "Output only. List of Batches that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", +"items": { +"type": "string" }, -"state": { -"description": "Output only. The state of the batch.", -"enum": [ -"STATE_UNSPECIFIED", -"PENDING", -"RUNNING", -"CANCELLING", -"CANCELLED", -"SUCCEEDED", -"FAILED" -], -"enumDescriptions": [ -"The batch state is unknown.", -"The batch is created before running.", -"The batch is running.", -"The batch is cancelling.", -"The batch cancellation was successful.", -"The batch completed successfully.", -"The batch is no longer running due to an error." -], "readOnly": true, -"type": "string" +"type": "array" +} }, -"stateHistory": { -"description": "Output only. Historical state information for the batch.", +"type": "object" +}, +"ListClustersResponse": { +"description": "The list of all clusters in a project.", +"id": "ListClustersResponse", +"properties": { +"clusters": { +"description": "Output only. The clusters in the project.", "items": { -"$ref": "StateHistory" +"$ref": "Cluster" }, "readOnly": true, "type": "array" }, -"stateMessage": { -"description": "Output only. Batch state details, such as a failure description if the state is FAILED.", +"nextPageToken": { +"description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest.", "readOnly": true, "type": "string" +} +}, +"type": "object" +}, +"ListJobsResponse": { +"description": "A list of jobs in a project.", +"id": "ListJobsResponse", +"properties": { +"jobs": { +"description": "Output only. Jobs list.", +"items": { +"$ref": "Job" }, -"stateTime": { -"description": "Output only. The time when the batch entered a current state.", -"format": "google-datetime", "readOnly": true, +"type": "array" +}, +"nextPageToken": { +"description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest.", "type": "string" }, -"uuid": { -"description": "Output only. A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.", -"readOnly": true, +"unreachable": { +"description": "Output only. List of jobs with kms_key-encrypted parameters that could not be decrypted. A response to a jobs.get request may indicate the reason for the decryption failure for a specific job.", +"items": { "type": "string" +}, +"readOnly": true, +"type": "array" } }, "type": "object" }, -"BatchOperationMetadata": { -"description": "Metadata describing the Batch operation.", -"id": "BatchOperationMetadata", +"ListOperationsResponse": { +"description": "The response message for Operations.ListOperations.", +"id": "ListOperationsResponse", "properties": { -"batch": { -"description": "Name of the batch for the operation.", +"nextPageToken": { +"description": "The standard List next-page token.", "type": "string" }, -"batchUuid": { -"description": "Batch UUID for the operation.", -"type": "string" +"operations": { +"description": "A list of operations that matches the specified filter in the request.", +"items": { +"$ref": "Operation" }, -"createTime": { -"description": "The time when the operation was created.", -"format": "google-datetime", -"type": "string" +"type": "array" +} }, -"description": { -"description": "Short description of the operation.", -"type": "string" +"type": "object" }, -"doneTime": { -"description": "The time when the operation finished.", -"format": "google-datetime", +"ListSessionTemplatesResponse": { +"description": "A list of session templates.", +"id": "ListSessionTemplatesResponse", +"properties": { +"nextPageToken": { +"description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" }, -"labels": { -"additionalProperties": { -"type": "string" +"sessionTemplates": { +"description": "Output only. Session template list", +"items": { +"$ref": "SessionTemplate" +}, +"readOnly": true, +"type": "array" +} }, -"description": "Labels associated with the operation.", "type": "object" }, -"operationType": { -"description": "The operation type.", -"enum": [ -"BATCH_OPERATION_TYPE_UNSPECIFIED", -"BATCH" -], -"enumDescriptions": [ -"Batch operation type is unknown.", -"Batch operation type." -], +"ListSessionsResponse": { +"description": "A list of interactive sessions.", +"id": "ListSessionsResponse", +"properties": { +"nextPageToken": { +"description": "A token, which can be sent as page_token, to retrieve the next page. If this field is omitted, there are no subsequent pages.", "type": "string" }, -"warnings": { -"description": "Warnings encountered during operation execution.", +"sessions": { +"description": "Output only. The sessions from the specified collection.", "items": { -"type": "string" +"$ref": "Session" }, +"readOnly": true, "type": "array" } }, "type": "object" }, -"Binding": { -"description": "Associates members, or principals, with a role.", -"id": "Binding", +"ListWorkflowTemplatesResponse": { +"description": "A response to a request to list workflow templates in a project.", +"id": "ListWorkflowTemplatesResponse", "properties": { -"condition": { -"$ref": "Expr", -"description": "The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies)." +"nextPageToken": { +"description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListWorkflowTemplatesRequest.", +"readOnly": true, +"type": "string" }, -"members": { -"description": "Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workforce identity pool. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}: All workforce identities in a group. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All workforce identities with a specific attribute value. principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*: All identities in a workforce identity pool. principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}: A single identity in a workload identity pool. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}: A workload identity pool group. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}: All identities in a workload identity pool with a certain attribute. principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*: All identities in a workload identity pool. deleted:user:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid} and the undeleted service account retains the role in the binding. deleted:group:{emailid}?uid={uniqueid}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid} and the recovered group retains the role in the binding. deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}: Deleted single identity in a workforce identity pool. For example, deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value.", +"templates": { +"description": "Output only. WorkflowTemplates list.", +"items": { +"$ref": "WorkflowTemplate" +}, +"readOnly": true, +"type": "array" +}, +"unreachable": { +"description": "Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", "items": { "type": "string" }, +"readOnly": true, "type": "array" +} }, -"role": { -"description": "Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.For an overview of the IAM roles and permissions, see the IAM documentation (https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see here (https://cloud.google.com/iam/docs/understanding-roles).", +"type": "object" +}, +"LoggingConfig": { +"description": "The runtime logging config of the job.", +"id": "LoggingConfig", +"properties": { +"driverLogLevels": { +"additionalProperties": { +"enum": [ +"LEVEL_UNSPECIFIED", +"ALL", +"TRACE", +"DEBUG", +"INFO", +"WARN", +"ERROR", +"FATAL", +"OFF" +], +"enumDescriptions": [ +"Level is unspecified. Use default level for log4j.", +"Use ALL level for log4j.", +"Use TRACE level for log4j.", +"Use DEBUG level for log4j.", +"Use INFO level for log4j.", +"Use WARN level for log4j.", +"Use ERROR level for log4j.", +"Use FATAL level for log4j.", +"Turn off log4j." +], "type": "string" -} }, +"description": "The per-package log levels for the driver. This can include \"root\" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'", "type": "object" +} }, -"CancelJobRequest": { -"description": "A request to cancel a job.", -"id": "CancelJobRequest", -"properties": {}, "type": "object" }, -"Cluster": { -"description": "Describes the identifying information, config, and status of a Dataproc cluster", -"id": "Cluster", +"ManagedCluster": { +"description": "Cluster that is managed by the workflow.", +"id": "ManagedCluster", "properties": { "clusterName": { -"description": "Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.", -"type": "string" -}, -"clusterUuid": { -"description": "Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.", -"readOnly": true, +"description": "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", "type": "string" }, "config": { "$ref": "ClusterConfig", -"description": "Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified." +"description": "Required. The cluster configuration." }, "labels": { "additionalProperties": { "type": "string" }, -"description": "Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.", +"description": "Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.", "type": "object" +} }, -"metrics": { -"$ref": "ClusterMetrics", -"description": "Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", -"readOnly": true +"type": "object" }, -"projectId": { -"description": "Required. The Google Cloud Platform project ID that the cluster belongs to.", +"ManagedGroupConfig": { +"description": "Specifies the resources used to actively manage an instance group.", +"id": "ManagedGroupConfig", +"properties": { +"instanceGroupManagerName": { +"description": "Output only. The name of the Instance Group Manager for this group.", +"readOnly": true, "type": "string" }, -"status": { -"$ref": "ClusterStatus", -"description": "Output only. Cluster status.", -"readOnly": true -}, -"statusHistory": { -"description": "Output only. The previous cluster status.", -"items": { -"$ref": "ClusterStatus" -}, +"instanceGroupManagerUri": { +"description": "Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.", "readOnly": true, -"type": "array" +"type": "string" }, -"virtualClusterConfig": { -"$ref": "VirtualClusterConfig", -"description": "Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified." +"instanceTemplateName": { +"description": "Output only. The name of the Instance Template used for the Managed Instance Group.", +"readOnly": true, +"type": "string" } }, "type": "object" }, -"ClusterConfig": { -"description": "The cluster config.", -"id": "ClusterConfig", +"MemoryMetrics": { +"id": "MemoryMetrics", "properties": { -"autoscalingConfig": { -"$ref": "AutoscalingConfig", -"description": "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset." -}, -"auxiliaryNodeGroups": { -"description": "Optional. The node group settings.", -"items": { -"$ref": "AuxiliaryNodeGroup" -}, -"type": "array" +"totalOffHeapStorageMemory": { +"format": "int64", +"type": "string" }, -"configBucket": { -"description": "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", +"totalOnHeapStorageMemory": { +"format": "int64", "type": "string" }, -"dataprocMetricConfig": { -"$ref": "DataprocMetricConfig", -"description": "Optional. The config for Dataproc metrics." +"usedOffHeapStorageMemory": { +"format": "int64", +"type": "string" }, -"encryptionConfig": { -"$ref": "EncryptionConfig", -"description": "Optional. Encryption settings for the cluster." +"usedOnHeapStorageMemory": { +"format": "int64", +"type": "string" +} }, -"endpointConfig": { -"$ref": "EndpointConfig", -"description": "Optional. Port/endpoint configuration for this cluster" +"type": "object" }, -"gceClusterConfig": { -"$ref": "GceClusterConfig", -"description": "Optional. The shared Compute Engine config settings for all instances in a cluster." +"MetastoreConfig": { +"description": "Specifies a Metastore configuration.", +"id": "MetastoreConfig", +"properties": { +"dataprocMetastoreService": { +"description": "Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]", +"type": "string" +} }, -"gkeClusterConfig": { -"$ref": "GkeClusterConfig", -"deprecated": true, -"description": "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config." +"type": "object" }, -"initializationActions": { -"description": "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi ", +"Metric": { +"description": "A Dataproc custom metric.", +"id": "Metric", +"properties": { +"metricOverrides": { +"description": "Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.", "items": { -"$ref": "NodeInitializationAction" +"type": "string" }, "type": "array" }, -"lifecycleConfig": { -"$ref": "LifecycleConfig", -"description": "Optional. Lifecycle setting for the cluster." -}, -"masterConfig": { -"$ref": "InstanceGroupConfig", -"description": "Optional. The Compute Engine config settings for the cluster's master instance." -}, -"metastoreConfig": { -"$ref": "MetastoreConfig", -"description": "Optional. Metastore configuration." -}, -"secondaryWorkerConfig": { -"$ref": "InstanceGroupConfig", -"description": "Optional. The Compute Engine config settings for a cluster's secondary worker instances" -}, -"securityConfig": { -"$ref": "SecurityConfig", -"description": "Optional. Security settings for the cluster." -}, -"softwareConfig": { -"$ref": "SoftwareConfig", -"description": "Optional. The config settings for cluster software." -}, -"tempBucket": { -"description": "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", +"metricSource": { +"description": "Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).", +"enum": [ +"METRIC_SOURCE_UNSPECIFIED", +"MONITORING_AGENT_DEFAULTS", +"HDFS", +"SPARK", +"YARN", +"SPARK_HISTORY_SERVER", +"HIVESERVER2", +"HIVEMETASTORE", +"FLINK" +], +"enumDescriptions": [ +"Required unspecified metric source.", +"Monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects monitoring agent metrics, which are published with an agent.googleapis.com prefix.", +"HDFS metric source.", +"Spark metric source.", +"YARN metric source.", +"Spark History Server metric source.", +"Hiveserver2 metric source.", +"hivemetastore metric source", +"flink metric source" +], "type": "string" -}, -"workerConfig": { -"$ref": "InstanceGroupConfig", -"description": "Optional. The Compute Engine config settings for the cluster's worker instances." } }, "type": "object" }, -"ClusterMetrics": { -"description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", -"id": "ClusterMetrics", +"NamespacedGkeDeploymentTarget": { +"deprecated": true, +"description": "Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.", +"id": "NamespacedGkeDeploymentTarget", "properties": { -"hdfsMetrics": { -"additionalProperties": { -"format": "int64", +"clusterNamespace": { +"description": "Optional. A namespace within the GKE cluster to deploy into.", "type": "string" }, -"description": "The HDFS metrics.", +"targetGkeCluster": { +"description": "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", +"type": "string" +} +}, "type": "object" }, -"yarnMetrics": { +"NodeGroup": { +"description": "Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.", +"id": "NodeGroup", +"properties": { +"labels": { "additionalProperties": { -"format": "int64", "type": "string" }, -"description": "YARN metrics.", +"description": "Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labels.", "type": "object" -} }, -"type": "object" +"name": { +"description": "The Node group resource name (https://aip.dev/122).", +"type": "string" }, -"ClusterOperation": { -"description": "The cluster operation triggered by a workflow.", -"id": "ClusterOperation", -"properties": { -"done": { -"description": "Output only. Indicates the operation is done.", -"readOnly": true, -"type": "boolean" +"nodeGroupConfig": { +"$ref": "InstanceGroupConfig", +"description": "Optional. The node group instance group configuration." }, -"error": { -"description": "Output only. Error, if operation failed.", -"readOnly": true, +"roles": { +"description": "Required. Node group roles.", +"items": { +"enum": [ +"ROLE_UNSPECIFIED", +"DRIVER" +], +"enumDescriptions": [ +"Required unspecified role.", +"Job drivers run on the node pool." +], "type": "string" }, -"operationId": { -"description": "Output only. The id of the cluster operation.", -"readOnly": true, -"type": "string" +"type": "array" } }, "type": "object" }, -"ClusterOperationMetadata": { -"description": "Metadata describing the operation.", -"id": "ClusterOperationMetadata", +"NodeGroupAffinity": { +"description": "Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.", +"id": "NodeGroupAffinity", "properties": { -"childOperationIds": { -"description": "Output only. Child operation ids", -"items": { +"nodeGroupUri": { +"description": "Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1", "type": "string" +} }, -"readOnly": true, -"type": "array" -}, -"clusterName": { -"description": "Output only. Name of the cluster for the operation.", -"readOnly": true, -"type": "string" +"type": "object" }, +"NodeGroupOperationMetadata": { +"description": "Metadata describing the node group operation.", +"id": "NodeGroupOperationMetadata", +"properties": { "clusterUuid": { -"description": "Output only. Cluster UUID for the operation.", +"description": "Output only. Cluster UUID associated with the node group operation.", "readOnly": true, "type": "string" }, @@ -3728,15 +8531,41 @@ "additionalProperties": { "type": "string" }, -"description": "Output only. Labels associated with the operation", +"description": "Output only. Labels associated with the operation.", "readOnly": true, "type": "object" }, -"operationType": { -"description": "Output only. The operation type.", +"nodeGroupId": { +"description": "Output only. Node group ID for the operation.", "readOnly": true, "type": "string" }, +"operationType": { +"description": "The operation type.", +"enum": [ +"NODE_GROUP_OPERATION_TYPE_UNSPECIFIED", +"CREATE", +"UPDATE", +"DELETE", +"RESIZE", +"REPAIR", +"UPDATE_LABELS", +"START", +"STOP" +], +"enumDescriptions": [ +"Node group operation type is unknown.", +"Create node group operation type.", +"Update node group operation type.", +"Delete node group operation type.", +"Resize node group operation type.", +"Repair node group operation type.", +"Update node group label operation type.", +"Start node group operation type.", +"Stop node group operation type." +], +"type": "string" +}, "status": { "$ref": "ClusterOperationStatus", "description": "Output only. Current operation status.", @@ -3761,405 +8590,474 @@ }, "type": "object" }, -"ClusterOperationStatus": { -"description": "The status of the operation.", -"id": "ClusterOperationStatus", +"NodeInitializationAction": { +"description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", +"id": "NodeInitializationAction", "properties": { -"details": { -"description": "Output only. A message containing any operation metadata details.", -"readOnly": true, +"executableFile": { +"description": "Required. Cloud Storage URI of executable file.", "type": "string" }, -"innerState": { -"description": "Output only. A message containing the detailed operation state.", -"readOnly": true, +"executionTimeout": { +"description": "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", +"format": "google-duration", "type": "string" +} }, -"state": { -"description": "Output only. A message containing the operation state.", +"type": "object" +}, +"NodePool": { +"description": "indicating a list of workers of same type", +"id": "NodePool", +"properties": { +"id": { +"description": "Required. A unique id of the node pool. Primary and Secondary workers can be specified using special reserved ids PRIMARY_WORKER_POOL and SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced using corresponding pool id.", +"type": "string" +}, +"instanceNames": { +"description": "Name of instances to be repaired. These instances must belong to specified node pool.", +"items": { +"type": "string" +}, +"type": "array" +}, +"repairAction": { +"description": "Required. Repair action to take on specified resources of the node pool.", "enum": [ -"UNKNOWN", -"PENDING", -"RUNNING", -"DONE" +"REPAIR_ACTION_UNSPECIFIED", +"DELETE" ], "enumDescriptions": [ -"Unused.", -"The operation has been created.", -"The operation is running.", -"The operation is done; either cancelled or completed." +"No action will be taken by default.", +"delete the specified list of nodes." ], -"readOnly": true, -"type": "string" -}, -"stateStartTime": { -"description": "Output only. The time this state was entered.", -"format": "google-datetime", -"readOnly": true, "type": "string" } }, "type": "object" }, -"ClusterSelector": { -"description": "A selector that chooses target cluster for jobs based on metadata.", -"id": "ClusterSelector", +"Operation": { +"description": "This resource represents a long-running operation that is the result of a network API call.", +"id": "Operation", "properties": { -"clusterLabels": { +"done": { +"description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", +"type": "boolean" +}, +"error": { +"$ref": "Status", +"description": "The error result of the operation in case of failure or cancellation." +}, +"metadata": { "additionalProperties": { -"type": "string" +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" }, -"description": "Required. The cluster labels. Cluster must have all labels to match.", +"description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", "type": "object" }, -"zone": { -"description": "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.", +"name": { +"description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.", "type": "string" +}, +"response": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"description": "The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", +"type": "object" } }, "type": "object" }, -"ClusterStatus": { -"description": "The status of a cluster and its instances.", -"id": "ClusterStatus", +"OrderedJob": { +"description": "A job executed by the workflow.", +"id": "OrderedJob", "properties": { -"detail": { -"description": "Optional. Output only. Details of cluster's state.", -"readOnly": true, +"flinkJob": { +"$ref": "FlinkJob", +"description": "Optional. Job is a Flink job." +}, +"hadoopJob": { +"$ref": "HadoopJob", +"description": "Optional. Job is a Hadoop job." +}, +"hiveJob": { +"$ref": "HiveJob", +"description": "Optional. Job is a Hive job." +}, +"labels": { +"additionalProperties": { "type": "string" }, -"state": { -"description": "Output only. The cluster's state.", -"enum": [ -"UNKNOWN", -"CREATING", -"RUNNING", -"ERROR", -"ERROR_DUE_TO_UPDATE", -"DELETING", -"UPDATING", -"STOPPING", -"STOPPED", -"STARTING", -"REPAIRING" -], -"enumDescriptions": [ -"The cluster state is unknown.", -"The cluster is being created and set up. It is not ready for use.", -"The cluster is currently running and healthy. It is ready for use.Note: The cluster state changes from \"creating\" to \"running\" status after the master node(s), first two primary worker nodes (and the last primary worker node if primary workers > 2) are running.", -"The cluster encountered an error. It is not ready for use.", -"The cluster has encountered an error while being updated. Jobs can be submitted to the cluster, but the cluster cannot be updated.", -"The cluster is being deleted. It cannot be used.", -"The cluster is being updated. It continues to accept and process jobs.", -"The cluster is being stopped. It cannot be used.", -"The cluster is currently stopped. It is not ready for use.", -"The cluster is being started. It is not ready for use.", -"The cluster is being repaired. It is not ready for use." -], -"readOnly": true, +"description": "Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given job.", +"type": "object" +}, +"pigJob": { +"$ref": "PigJob", +"description": "Optional. Job is a Pig job." +}, +"prerequisiteStepIds": { +"description": "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", +"items": { "type": "string" }, -"stateStartTime": { -"description": "Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", -"format": "google-datetime", -"readOnly": true, +"type": "array" +}, +"prestoJob": { +"$ref": "PrestoJob", +"description": "Optional. Job is a Presto job." +}, +"pysparkJob": { +"$ref": "PySparkJob", +"description": "Optional. Job is a PySpark job." +}, +"scheduling": { +"$ref": "JobScheduling", +"description": "Optional. Job scheduling configuration." +}, +"sparkJob": { +"$ref": "SparkJob", +"description": "Optional. Job is a Spark job." +}, +"sparkRJob": { +"$ref": "SparkRJob", +"description": "Optional. Job is a SparkR job." +}, +"sparkSqlJob": { +"$ref": "SparkSqlJob", +"description": "Optional. Job is a SparkSql job." +}, +"stepId": { +"description": "Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", +"type": "string" +}, +"trinoJob": { +"$ref": "TrinoJob", +"description": "Optional. Job is a Trino job." +} +}, +"type": "object" +}, +"OutputMetrics": { +"description": "Metrics about the data written by the task.", +"id": "OutputMetrics", +"properties": { +"bytesWritten": { +"format": "int64", "type": "string" }, -"substate": { -"description": "Output only. Additional state information that includes status reported by the agent.", -"enum": [ -"UNSPECIFIED", -"UNHEALTHY", -"STALE_STATUS" -], -"enumDescriptions": [ -"The cluster substate is unknown.", -"The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.", -"The agent-reported status is out of date (may occur if Dataproc loses communication with Agent).Applies to RUNNING state." -], -"readOnly": true, +"recordsWritten": { +"format": "int64", "type": "string" } }, "type": "object" }, -"ConfidentialInstanceConfig": { -"description": "Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)", -"id": "ConfidentialInstanceConfig", +"OutputQuantileMetrics": { +"id": "OutputQuantileMetrics", "properties": { -"enableConfidentialCompute": { -"description": "Optional. Defines whether the instance should have confidential compute enabled.", -"type": "boolean" +"bytesWritten": { +"$ref": "Quantiles" +}, +"recordsWritten": { +"$ref": "Quantiles" } }, "type": "object" }, -"DataprocMetricConfig": { -"description": "Dataproc metric config.", -"id": "DataprocMetricConfig", +"ParameterValidation": { +"description": "Configuration for parameter validation.", +"id": "ParameterValidation", "properties": { -"metrics": { -"description": "Required. Metrics sources to enable.", -"items": { -"$ref": "Metric" +"regex": { +"$ref": "RegexValidation", +"description": "Validation based on regular expressions." }, -"type": "array" +"values": { +"$ref": "ValueValidation", +"description": "Validation based on a list of allowed values." } }, "type": "object" }, -"DiagnoseClusterRequest": { -"description": "A request to collect cluster diagnostic information.", -"id": "DiagnoseClusterRequest", +"PeripheralsConfig": { +"description": "Auxiliary services configuration for a workload.", +"id": "PeripheralsConfig", "properties": { -"diagnosisInterval": { -"$ref": "Interval", -"description": "Optional. Time interval in which diagnosis should be carried out on the cluster." -}, -"job": { -"deprecated": true, -"description": "Optional. DEPRECATED Specifies the job on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", +"metastoreService": { +"description": "Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]", "type": "string" }, -"jobs": { -"description": "Optional. Specifies a list of jobs on which diagnosis is to be performed. Format: projects/{project}/regions/{region}/jobs/{job}", +"sparkHistoryServerConfig": { +"$ref": "SparkHistoryServerConfig", +"description": "Optional. The Spark History Server configuration for the workload." +} +}, +"type": "object" +}, +"PigJob": { +"description": "A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.", +"id": "PigJob", +"properties": { +"continueOnFailure": { +"description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", +"type": "boolean" +}, +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", "items": { "type": "string" }, "type": "array" }, -"tarballAccess": { -"description": "Optional. (Optional) The access type to the diagnostic tarball. If not specified, falls back to default access of the bucket", -"enum": [ -"TARBALL_ACCESS_UNSPECIFIED", -"GOOGLE_CLOUD_SUPPORT", -"GOOGLE_DATAPROC_DIAGNOSE" -], -"enumDescriptions": [ -"Tarball Access unspecified. Falls back to default access of the bucket", -"Google Cloud Support group has read access to the diagnostic tarball", -"Google Cloud Dataproc Diagnose service account has read access to the diagnostic tarball" -], -"type": "string" +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." }, -"tarballGcsDir": { -"description": "Optional. (Optional) The output Cloud Storage directory for the diagnostic tarball. If not specified, a task-specific directory in the cluster's staging bucket will be used.", +"properties": { +"additionalProperties": { "type": "string" }, -"yarnApplicationId": { -"deprecated": true, -"description": "Optional. DEPRECATED Specifies the yarn application on which diagnosis is to be performed.", -"type": "string" +"description": "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", +"type": "object" }, -"yarnApplicationIds": { -"description": "Optional. Specifies a list of yarn applications on which diagnosis is to be performed.", -"items": { +"queryFileUri": { +"description": "The HCFS URI of the script that contains the Pig queries.", "type": "string" }, -"type": "array" -} -}, -"type": "object" +"queryList": { +"$ref": "QueryList", +"description": "A list of queries." }, -"DiagnoseClusterResults": { -"description": "The location of diagnostic output.", -"id": "DiagnoseClusterResults", -"properties": { -"outputUri": { -"description": "Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.", -"readOnly": true, +"scriptVariables": { +"additionalProperties": { "type": "string" +}, +"description": "Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).", +"type": "object" } }, "type": "object" }, -"DiskConfig": { -"description": "Specifies the config of disk options for a group of VM instances.", -"id": "DiskConfig", +"Policy": { +"description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members, or principals, to a single role. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.For some types of Google Cloud resources, a binding can also specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).JSON example: { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } YAML example: bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", +"id": "Policy", "properties": { -"bootDiskProvisionedIops": { -"description": "Optional. Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Note: This field is only supported if boot_disk_type is hyperdisk-balanced.", -"format": "int64", -"type": "string" +"bindings": { +"description": "Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.", +"items": { +"$ref": "Binding" }, -"bootDiskProvisionedThroughput": { -"description": "Optional. Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be greater than or equal to 1. Note: This field is only supported if boot_disk_type is hyperdisk-balanced.", -"format": "int64", +"type": "array" +}, +"etag": { +"description": "etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.", +"format": "byte", "type": "string" }, -"bootDiskSizeGb": { -"description": "Optional. Size in GB of the boot disk (default is 500GB).", +"version": { +"description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", "format": "int32", "type": "integer" +} }, -"bootDiskType": { -"description": "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).", +"type": "object" +}, +"PoolData": { +"description": "Pool Data", +"id": "PoolData", +"properties": { +"name": { "type": "string" }, -"localSsdInterface": { -"description": "Optional. Interface type of local SSDs (default is \"scsi\"). Valid values: \"scsi\" (Small Computer System Interface), \"nvme\" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).", +"stageIds": { +"items": { +"format": "int64", "type": "string" }, -"numLocalSsds": { -"description": "Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.", -"format": "int32", -"type": "integer" +"type": "array" } }, "type": "object" }, -"DriverSchedulingConfig": { -"description": "Driver scheduling configuration.", -"id": "DriverSchedulingConfig", +"PrestoJob": { +"description": "A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.", +"id": "PrestoJob", "properties": { -"memoryMb": { -"description": "Required. The amount of memory in MB the driver is requesting.", -"format": "int32", -"type": "integer" +"clientTags": { +"description": "Optional. Presto client tags to attach to this query", +"items": { +"type": "string" }, -"vcores": { -"description": "Required. The number of vCPUs the driver is requesting.", -"format": "int32", -"type": "integer" -} +"type": "array" }, -"type": "object" +"continueOnFailure": { +"description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", +"type": "boolean" }, -"Empty": { -"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } ", -"id": "Empty", -"properties": {}, -"type": "object" +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." +}, +"outputFormat": { +"description": "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", +"type": "string" }, -"EncryptionConfig": { -"description": "Encryption settings for the cluster.", -"id": "EncryptionConfig", "properties": { -"gcePdKmsKeyName": { -"description": "Optional. The Cloud KMS key resource name to use for persistent disk encryption for all instances in the cluster. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.", +"additionalProperties": { "type": "string" }, -"kmsKey": { -"description": "Optional. The Cloud KMS key resource name to use for cluster persistent disk and job argument encryption. See Use CMEK with cluster data (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data) for more information.When this key resource name is provided, the following job arguments of the following job types submitted to the cluster are encrypted using CMEK: FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", +"description": "Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", +"type": "object" +}, +"queryFileUri": { +"description": "The HCFS URI of the script that contains SQL queries.", "type": "string" +}, +"queryList": { +"$ref": "QueryList", +"description": "A list of queries." } }, "type": "object" }, -"EndpointConfig": { -"description": "Endpoint config for this cluster", -"id": "EndpointConfig", +"ProcessSummary": { +"description": "Process Summary", +"id": "ProcessSummary", "properties": { -"enableHttpPortAccess": { -"description": "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", +"addTime": { +"format": "google-datetime", +"type": "string" +}, +"hostPort": { +"type": "string" +}, +"isActive": { "type": "boolean" }, -"httpPorts": { +"processId": { +"type": "string" +}, +"processLogs": { "additionalProperties": { "type": "string" }, -"description": "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", -"readOnly": true, -"type": "object" +"type": "object" +}, +"removeTime": { +"format": "google-datetime", +"type": "string" +}, +"totalCores": { +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"ProvisioningModelMix": { +"description": "Defines how Dataproc should create VMs with a mixture of provisioning models.", +"id": "ProvisioningModelMix", +"properties": { +"standardCapacityBase": { +"description": "Optional. The base capacity that will always use Standard VMs to avoid risk of more preemption than the minimum capacity you need. Dataproc will create only standard VMs until it reaches standard_capacity_base, then it will start using standard_capacity_percent_above_base to mix Spot with Standard VMs. eg. If 15 instances are requested and standard_capacity_base is 5, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances.", +"format": "int32", +"type": "integer" +}, +"standardCapacityPercentAboveBase": { +"description": "Optional. The percentage of target capacity that should use Standard VM. The remaining percentage will use Spot VMs. The percentage applies only to the capacity above standard_capacity_base. eg. If 15 instances are requested and standard_capacity_base is 5 and standard_capacity_percent_above_base is 30, Dataproc will create 5 standard VMs and then start mixing spot and standard VMs for remaining 10 instances. The mix will be 30% standard and 70% spot.", +"format": "int32", +"type": "integer" } }, "type": "object" }, -"EnvironmentConfig": { -"description": "Environment configuration for a workload.", -"id": "EnvironmentConfig", +"PyPiRepositoryConfig": { +"description": "Configuration for PyPi repository", +"id": "PyPiRepositoryConfig", "properties": { -"executionConfig": { -"$ref": "ExecutionConfig", -"description": "Optional. Execution configuration for a workload." -}, -"peripheralsConfig": { -"$ref": "PeripheralsConfig", -"description": "Optional. Peripherals configuration that workload has access to." +"pypiRepository": { +"description": "Optional. PyPi repository address", +"type": "string" } }, "type": "object" }, -"ExecutionConfig": { -"description": "Execution configuration for a workload.", -"id": "ExecutionConfig", +"PySparkBatch": { +"description": "A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.", +"id": "PySparkBatch", "properties": { -"idleTtl": { -"description": "Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 1 hour if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", -"format": "google-duration", +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", +"items": { "type": "string" }, -"kmsKey": { -"description": "Optional. The Cloud KMS key to use for encryption.", -"type": "string" +"type": "array" }, -"networkTags": { -"description": "Optional. Tags used for network traffic control.", +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", "items": { "type": "string" }, "type": "array" }, -"networkUri": { -"description": "Optional. Network URI to connect workload to.", +"fileUris": { +"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", +"items": { "type": "string" }, -"serviceAccount": { -"description": "Optional. Service account that used to execute workload.", -"type": "string" +"type": "array" }, -"stagingBucket": { -"description": "Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.", +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", +"items": { "type": "string" }, -"subnetworkUri": { -"description": "Optional. Subnetwork URI to connect workload to.", +"type": "array" +}, +"mainPythonFileUri": { +"description": "Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.", "type": "string" }, -"ttl": { -"description": "Optional. The duration after which the workload will be terminated, specified as the JSON representation for Duration (https://protobuf.dev/programming-guides/proto3/#json). When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or run forever without exiting). If ttl is not specified for an interactive session, it defaults to 24 hours. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4 hours. Minimum value is 10 minutes; maximum value is 14 days. If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.", -"format": "google-duration", +"pythonFileUris": { +"description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", +"items": { "type": "string" +}, +"type": "array" } }, "type": "object" }, -"Expr": { -"description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", -"id": "Expr", +"PySparkJob": { +"description": "A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", +"id": "PySparkJob", "properties": { -"description": { -"description": "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", -"type": "string" -}, -"expression": { -"description": "Textual representation of an expression in Common Expression Language syntax.", +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", +"items": { "type": "string" }, -"location": { -"description": "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", -"type": "string" +"type": "array" }, -"title": { -"description": "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", +"items": { "type": "string" -} }, -"type": "object" +"type": "array" }, -"FlinkJob": { -"description": "A Dataproc job for running Apache Flink applications on YARN.", -"id": "FlinkJob", -"properties": { -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", +"fileUris": { +"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, "type": "array" }, "jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.", +"description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", "items": { "type": "string" }, @@ -4169,437 +9067,618 @@ "$ref": "LoggingConfig", "description": "Optional. The runtime log config for job execution." }, -"mainClass": { -"description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jarFileUris.", -"type": "string" -}, -"mainJarFileUri": { -"description": "The HCFS URI of the jar file that contains the main class.", +"mainPythonFileUri": { +"description": "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", "type": "string" }, "properties": { "additionalProperties": { "type": "string" }, -"description": "Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API might beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.", +"description": "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", "type": "object" }, -"savepointUri": { -"description": "Optional. HCFS URI of the savepoint, which contains the last saved progress for starting the current job.", +"pythonFileUris": { +"description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", +"items": { "type": "string" +}, +"type": "array" } }, "type": "object" }, -"GceClusterConfig": { -"description": "Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.", -"id": "GceClusterConfig", +"Quantiles": { +"description": "Quantile metrics data related to Tasks. Units can be seconds, bytes, milliseconds, etc depending on the message type.", +"id": "Quantiles", "properties": { -"confidentialInstanceConfig": { -"$ref": "ConfidentialInstanceConfig", -"description": "Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)." +"count": { +"format": "int64", +"type": "string" }, -"internalIpOnly": { -"description": "Optional. This setting applies to subnetwork-enabled networks. It is set to true by default in clusters created with image versions 2.2.x.When set to true: All cluster VMs have internal IP addresses. Google Private Access (https://cloud.google.com/vpc/docs/private-google-access) must be enabled to access Dataproc and other Google Cloud APIs. Off-cluster dependencies must be configured to be accessible without external IP addresses.When set to false: Cluster VMs are not restricted to internal IP addresses. Ephemeral external IP addresses are assigned to each cluster VM.", -"type": "boolean" +"maximum": { +"format": "int64", +"type": "string" }, -"metadata": { -"additionalProperties": { +"minimum": { +"format": "int64", "type": "string" }, -"description": "Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", +"percentile25": { +"format": "int64", +"type": "string" +}, +"percentile50": { +"format": "int64", +"type": "string" +}, +"percentile75": { +"format": "int64", +"type": "string" +}, +"sum": { +"format": "int64", +"type": "string" +} +}, "type": "object" }, -"networkUri": { -"description": "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default", +"QueryList": { +"description": "A list of queries to run on a cluster.", +"id": "QueryList", +"properties": { +"queries": { +"description": "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } } ", +"items": { "type": "string" }, -"nodeGroupAffinity": { -"$ref": "NodeGroupAffinity", -"description": "Optional. Node Group Affinity for sole-tenant clusters." +"type": "array" +} }, -"privateIpv6GoogleAccess": { -"description": "Optional. The type of IPv6 access for a cluster.", -"enum": [ -"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED", -"INHERIT_FROM_SUBNETWORK", -"OUTBOUND", -"BIDIRECTIONAL" -], -"enumDescriptions": [ -"If unspecified, Compute Engine default behavior will apply, which is the same as INHERIT_FROM_SUBNETWORK.", -"Private access to and from Google Services configuration inherited from the subnetwork configuration. This is the default Compute Engine behavior.", -"Enables outbound private IPv6 access to Google Services from the Dataproc cluster.", -"Enables bidirectional private IPv6 access between Google Services and the Dataproc cluster." -], +"type": "object" +}, +"RddDataDistribution": { +"description": "Details about RDD usage.", +"id": "RddDataDistribution", +"properties": { +"address": { "type": "string" }, -"reservationAffinity": { -"$ref": "ReservationAffinity", -"description": "Optional. Reservation Affinity for consuming Zonal reservation." +"diskUsed": { +"format": "int64", +"type": "string" }, -"serviceAccount": { -"description": "Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", +"memoryRemaining": { +"format": "int64", "type": "string" }, -"serviceAccountScopes": { -"description": "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control", -"items": { +"memoryUsed": { +"format": "int64", "type": "string" }, -"type": "array" +"offHeapMemoryRemaining": { +"format": "int64", +"type": "string" }, -"shieldedInstanceConfig": { -"$ref": "ShieldedInstanceConfig", -"description": "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm)." +"offHeapMemoryUsed": { +"format": "int64", +"type": "string" }, -"subnetworkUri": { -"description": "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0", +"onHeapMemoryRemaining": { +"format": "int64", "type": "string" }, -"tags": { -"description": "The Compute Engine network tags to add to all instances (see Tagging instances (https://cloud.google.com/vpc/docs/add-remove-network-tags)).", -"items": { +"onHeapMemoryUsed": { +"format": "int64", "type": "string" +} +}, +"type": "object" +}, +"RddOperationCluster": { +"description": "A grouping of nodes representing higher level constructs (stage, job etc.).", +"id": "RddOperationCluster", +"properties": { +"childClusters": { +"items": { +"$ref": "RddOperationCluster" }, "type": "array" }, -"zoneUri": { -"description": "Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]", +"childNodes": { +"items": { +"$ref": "RddOperationNode" +}, +"type": "array" +}, +"name": { +"type": "string" +}, +"rddClusterId": { "type": "string" } }, "type": "object" }, -"GetIamPolicyRequest": { -"description": "Request message for GetIamPolicy method.", -"id": "GetIamPolicyRequest", +"RddOperationEdge": { +"description": "A directed edge representing dependency between two RDDs.", +"id": "RddOperationEdge", "properties": { -"options": { -"$ref": "GetPolicyOptions", -"description": "OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy." +"fromId": { +"format": "int32", +"type": "integer" +}, +"toId": { +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"RddOperationGraph": { +"description": "Graph representing RDD dependencies. Consists of edges and a root cluster.", +"id": "RddOperationGraph", +"properties": { +"edges": { +"items": { +"$ref": "RddOperationEdge" +}, +"type": "array" +}, +"incomingEdges": { +"items": { +"$ref": "RddOperationEdge" +}, +"type": "array" +}, +"outgoingEdges": { +"items": { +"$ref": "RddOperationEdge" +}, +"type": "array" +}, +"rootCluster": { +"$ref": "RddOperationCluster" +}, +"stageId": { +"format": "int64", +"type": "string" } }, "type": "object" }, -"GetPolicyOptions": { -"description": "Encapsulates settings provided to GetIamPolicy.", -"id": "GetPolicyOptions", +"RddOperationNode": { +"description": "A node in the RDD operation graph. Corresponds to a single RDD.", +"id": "RddOperationNode", "properties": { -"requestedPolicyVersion": { -"description": "Optional. The maximum policy version that will be used to format the policy.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset.The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", +"barrier": { +"type": "boolean" +}, +"cached": { +"type": "boolean" +}, +"callsite": { +"type": "string" +}, +"name": { +"type": "string" +}, +"nodeId": { "format": "int32", "type": "integer" +}, +"outputDeterministicLevel": { +"enum": [ +"DETERMINISTIC_LEVEL_UNSPECIFIED", +"DETERMINISTIC_LEVEL_DETERMINATE", +"DETERMINISTIC_LEVEL_UNORDERED", +"DETERMINISTIC_LEVEL_INDETERMINATE" +], +"enumDescriptions": [ +"", +"", +"", +"" +], +"type": "string" } }, "type": "object" }, -"GkeClusterConfig": { -"description": "The cluster's GKE config.", -"id": "GkeClusterConfig", +"RddPartitionInfo": { +"description": "Information about RDD partitions.", +"id": "RddPartitionInfo", "properties": { -"gkeClusterTarget": { -"description": "Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", +"blockName": { "type": "string" }, -"namespacedGkeDeploymentTarget": { -"$ref": "NamespacedGkeDeploymentTarget", -"deprecated": true, -"description": "Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment." +"diskUsed": { +"format": "int64", +"type": "string" }, -"nodePoolTarget": { -"description": "Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.", +"executors": { "items": { -"$ref": "GkeNodePoolTarget" +"type": "string" }, "type": "array" +}, +"memoryUsed": { +"format": "int64", +"type": "string" +}, +"storageLevel": { +"type": "string" } }, "type": "object" }, -"GkeNodeConfig": { -"description": "Parameters that describe cluster nodes.", -"id": "GkeNodeConfig", +"RddStorageInfo": { +"description": "Overall data about RDD storage.", +"id": "RddStorageInfo", "properties": { -"accelerators": { -"description": "Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.", +"dataDistribution": { "items": { -"$ref": "GkeNodePoolAcceleratorConfig" +"$ref": "RddDataDistribution" }, "type": "array" }, -"bootDiskKmsKey": { -"description": "Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}", +"diskUsed": { +"format": "int64", "type": "string" }, -"localSsdCount": { -"description": "Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).", -"format": "int32", -"type": "integer" -}, -"machineType": { -"description": "Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).", +"memoryUsed": { +"format": "int64", "type": "string" }, -"minCpuPlatform": { -"description": "Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as \"Intel Haswell\"` or Intel Sandy Bridge\".", +"name": { "type": "string" }, -"preemptible": { -"description": "Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", -"type": "boolean" +"numCachedPartitions": { +"format": "int32", +"type": "integer" }, -"spot": { -"description": "Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).", -"type": "boolean" -} +"numPartitions": { +"format": "int32", +"type": "integer" }, -"type": "object" +"partitions": { +"items": { +"$ref": "RddPartitionInfo" }, -"GkeNodePoolAcceleratorConfig": { -"description": "A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.", -"id": "GkeNodePoolAcceleratorConfig", -"properties": { -"acceleratorCount": { -"description": "The number of accelerator cards exposed to an instance.", -"format": "int64", -"type": "string" +"type": "array" }, -"acceleratorType": { -"description": "The accelerator type resource namename (see GPUs on Compute Engine).", -"type": "string" +"rddStorageId": { +"format": "int32", +"type": "integer" }, -"gpuPartitionSize": { -"description": "Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).", +"storageLevel": { "type": "string" } }, "type": "object" }, -"GkeNodePoolAutoscalingConfig": { -"description": "GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.", -"id": "GkeNodePoolAutoscalingConfig", +"RegexValidation": { +"description": "Validation based on regular expressions.", +"id": "RegexValidation", "properties": { -"maxNodeCount": { -"description": "The maximum number of nodes in the node pool. Must be >= min_node_count, and must be > 0. Note: Quota must be sufficient to scale up the cluster.", -"format": "int32", -"type": "integer" +"regexes": { +"description": "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", +"items": { +"type": "string" }, -"minNodeCount": { -"description": "The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.", -"format": "int32", -"type": "integer" +"type": "array" } }, "type": "object" }, -"GkeNodePoolConfig": { -"description": "The configuration of a GKE node pool used by a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster).", -"id": "GkeNodePoolConfig", +"RepairClusterRequest": { +"description": "A request to repair a cluster.", +"id": "RepairClusterRequest", "properties": { -"autoscaling": { -"$ref": "GkeNodePoolAutoscalingConfig", -"description": "Optional. The autoscaler configuration for this node pool. The autoscaler is enabled only when a valid configuration is present." +"cluster": { +"$ref": "ClusterToRepair", +"description": "Optional. Cluster to be repaired" }, -"config": { -"$ref": "GkeNodeConfig", -"description": "Optional. The node pool configuration." +"clusterUuid": { +"description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", +"type": "string" }, -"locations": { -"description": "Optional. The list of Compute Engine zones (https://cloud.google.com/compute/docs/zones#available) where node pool nodes associated with a Dataproc on GKE virtual cluster will be located.Note: All node pools associated with a virtual cluster must be located in the same region as the virtual cluster, and they must be located in the same zone within that region.If a location is not specified during node pool creation, Dataproc on GKE will choose the zone.", -"items": { +"gracefulDecommissionTimeout": { +"description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping\u2014Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+.", +"format": "google-duration", "type": "string" }, +"nodePools": { +"description": "Optional. Node pools and corresponding repair action to be taken. All node pools should be unique in this request. i.e. Multiple entries for the same node pool id are not allowed.", +"items": { +"$ref": "NodePool" +}, "type": "array" +}, +"parentOperationId": { +"description": "Optional. operation id of the parent operation sending the repair request", +"type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "string" } }, "type": "object" }, -"GkeNodePoolTarget": { -"description": "GKE node pools that Dataproc workloads run on.", -"id": "GkeNodePoolTarget", +"RepairNodeGroupRequest": { +"id": "RepairNodeGroupRequest", "properties": { -"nodePool": { -"description": "Required. The target GKE node pool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'", +"instanceNames": { +"description": "Required. Name of instances to be repaired. These instances must belong to specified node pool.", +"items": { "type": "string" }, -"nodePoolConfig": { -"$ref": "GkeNodePoolConfig", -"description": "Input only. The configuration for the GKE node pool.If specified, Dataproc attempts to create a node pool with the specified shape. If one with the same name already exists, it is verified against all specified fields. If a field differs, the virtual cluster creation will fail.If omitted, any node pool with the specified name is used. If a node pool with the specified name does not exist, Dataproc create a node pool with default values.This is an input only field. It will not be returned by the API." +"type": "array" }, -"roles": { -"description": "Required. The roles associated with the GKE node pool.", -"items": { +"repairAction": { +"description": "Required. Repair action to take on specified resources of the node pool.", "enum": [ -"ROLE_UNSPECIFIED", -"DEFAULT", -"CONTROLLER", -"SPARK_DRIVER", -"SPARK_EXECUTOR" +"REPAIR_ACTION_UNSPECIFIED", +"REPLACE" ], "enumDescriptions": [ -"Role is unspecified.", -"At least one node pool must have the DEFAULT role. Work assigned to a role that is not associated with a node pool is assigned to the node pool with the DEFAULT role. For example, work assigned to the CONTROLLER role will be assigned to the node pool with the DEFAULT role if no node pool has the CONTROLLER role.", -"Run work associated with the Dataproc control plane (for example, controllers and webhooks). Very low resource requirements.", -"Run work associated with a Spark driver of a job.", -"Run work associated with a Spark executor of a job." +"No action will be taken by default.", +"replace the specified list of nodes." ], "type": "string" }, -"type": "array" +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two RepairNodeGroupRequest with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "string" } }, "type": "object" }, -"GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig": { -"description": "Encryption settings for encrypting workflow template job arguments.", -"id": "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig", +"RepositoryConfig": { +"description": "Configuration for dependency repositories", +"id": "RepositoryConfig", "properties": { -"kmsKey": { -"description": "Optional. The Cloud KMS key name to use for encrypting workflow template job arguments.When this this key is provided, the following workflow template job arguments (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template), if present, are CMEK encrypted (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data): FlinkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob) HadoopJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob) SparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob) SparkRJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob) PySparkJob args (https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob) SparkSqlJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob) scriptVariables and queryList.queries HiveJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) scriptVariables and queryList.queries PigJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob) scriptVariables and queryList.queries PrestoJob (https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob) scriptVariables and queryList.queries", -"type": "string" +"pypiRepositoryConfig": { +"$ref": "PyPiRepositoryConfig", +"description": "Optional. Configuration for PyPi repository." } }, "type": "object" }, -"HadoopJob": { -"description": "A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", -"id": "HadoopJob", +"ReservationAffinity": { +"description": "Reservation Affinity for consuming Zonal reservation.", +"id": "ReservationAffinity", "properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", -"items": { +"consumeReservationType": { +"description": "Optional. Type of reservation to consume", +"enum": [ +"TYPE_UNSPECIFIED", +"NO_RESERVATION", +"ANY_RESERVATION", +"SPECIFIC_RESERVATION" +], +"enumDescriptions": [ +"", +"Do not consume from any allocated capacity.", +"Consume any reservation available.", +"Must consume from a specific reservation. Must specify key value fields for specifying the reservations." +], "type": "string" }, -"type": "array" +"key": { +"description": "Optional. Corresponds to the label key of reservation resource.", +"type": "string" }, -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision might occur that causes an incorrect job submission.", +"values": { +"description": "Optional. Corresponds to the label values of reservation resource.", "items": { "type": "string" }, "type": "array" +} }, -"fileUris": { -"description": "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", -"items": { +"type": "object" +}, +"ResizeNodeGroupRequest": { +"description": "A request to resize a node group.", +"id": "ResizeNodeGroupRequest", +"properties": { +"gracefulDecommissionTimeout": { +"description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) allows the removal of nodes from the Compute Engine node group without interrupting jobs in progress. This timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", +"format": "google-duration", "type": "string" }, -"type": "array" +"parentOperationId": { +"description": "Optional. operation id of the parent operation sending the resize request", +"type": "string" }, -"jarFileUris": { -"description": "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two ResizeNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "string" +}, +"size": { +"description": "Required. The number of running instances for the node group to maintain. The group adds or removes instances to maintain the number of instances specified by this parameter.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"ResourceInformation": { +"id": "ResourceInformation", +"properties": { +"addresses": { "items": { "type": "string" }, "type": "array" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." -}, -"mainClass": { -"description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.", +"name": { "type": "string" +} }, -"mainJarFileUri": { -"description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", -"type": "string" +"type": "object" }, +"ResourceProfileInfo": { +"description": "Resource profile that contains information about all the resources required by executors and tasks.", +"id": "ResourceProfileInfo", "properties": { +"executorResources": { "additionalProperties": { -"type": "string" +"$ref": "ExecutorResourceRequest" +}, +"type": "object" +}, +"resourceProfileId": { +"format": "int32", +"type": "integer" +}, +"taskResources": { +"additionalProperties": { +"$ref": "TaskResourceRequest" }, -"description": "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", "type": "object" } }, "type": "object" }, -"HiveJob": { -"description": "A Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.", -"id": "HiveJob", +"RuntimeConfig": { +"description": "Runtime configuration for a workload.", +"id": "RuntimeConfig", "properties": { -"continueOnFailure": { -"description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", -"type": "boolean" +"autotuningConfig": { +"$ref": "AutotuningConfig", +"description": "Optional. Autotuning configuration of the workload." }, -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", -"items": { +"cohort": { +"description": "Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.", "type": "string" }, -"type": "array" +"containerImage": { +"description": "Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.", +"type": "string" }, "properties": { "additionalProperties": { "type": "string" }, -"description": "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", +"description": "Optional. A mapping of property names to values, which are used to configure workload execution.", "type": "object" }, -"queryFileUri": { -"description": "The HCFS URI of the script that contains Hive queries.", +"repositoryConfig": { +"$ref": "RepositoryConfig", +"description": "Optional. Dependency repository configuration." +}, +"version": { +"description": "Optional. Version of the batch runtime.", "type": "string" +} }, -"queryList": { -"$ref": "QueryList", -"description": "A list of queries." +"type": "object" }, -"scriptVariables": { +"RuntimeInfo": { +"description": "Runtime information about workload execution.", +"id": "RuntimeInfo", +"properties": { +"approximateUsage": { +"$ref": "UsageMetrics", +"description": "Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments).", +"readOnly": true +}, +"currentUsage": { +"$ref": "UsageSnapshot", +"description": "Output only. Snapshot of current workload resource usage.", +"readOnly": true +}, +"diagnosticOutputUri": { +"description": "Output only. A URI pointing to the location of the diagnostics tarball.", +"readOnly": true, +"type": "string" +}, +"endpoints": { "additionalProperties": { "type": "string" }, -"description": "Optional. Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).", +"description": "Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs.", +"readOnly": true, "type": "object" +}, +"outputUri": { +"description": "Output only. A URI pointing to the location of the stdout and stderr of the workload.", +"readOnly": true, +"type": "string" } }, "type": "object" }, -"IdentityConfig": { -"description": "Identity related configuration, including service account based secure multi-tenancy user mappings.", -"id": "IdentityConfig", +"SearchSessionSparkApplicationExecutorStageSummaryResponse": { +"description": "List of Executors associated with a Spark Application Stage.", +"id": "SearchSessionSparkApplicationExecutorStageSummaryResponse", "properties": { -"userServiceAccountMapping": { -"additionalProperties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorStageSummaryRequest.", "type": "string" }, -"description": "Required. Map of user to service account.", -"type": "object" +"sparkApplicationStageExecutors": { +"description": "Details about executors used by the application stage.", +"items": { +"$ref": "ExecutorStageSummary" +}, +"type": "array" } }, "type": "object" }, -"InjectCredentialsRequest": { -"description": "A request to inject credentials into a cluster.", -"id": "InjectCredentialsRequest", +"SearchSessionSparkApplicationExecutorsResponse": { +"description": "List of Executors associated with a Spark Application.", +"id": "SearchSessionSparkApplicationExecutorsResponse", "properties": { -"clusterUuid": { -"description": "Required. The cluster UUID.", +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationExecutorsRequest.", "type": "string" }, -"credentialsCiphertext": { -"description": "Required. The encrypted credentials being injected in to the cluster.The client is responsible for encrypting the credentials in a way that is supported by the cluster.A wrapped value is used here so that the actual contents of the encrypted credentials are not written to audit logs.", -"type": "string" +"sparkApplicationExecutors": { +"description": "Details about executors used by the application.", +"items": { +"$ref": "ExecutorSummary" +}, +"type": "array" } }, "type": "object" }, -"InstanceFlexibilityPolicy": { -"description": "Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.", -"id": "InstanceFlexibilityPolicy", +"SearchSessionSparkApplicationJobsResponse": { +"description": "A list of Jobs associated with a Spark Application.", +"id": "SearchSessionSparkApplicationJobsResponse", "properties": { -"instanceSelectionList": { -"description": "Optional. List of instance selection options that the group will use when creating new VMs.", +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationJobsRequest.", +"type": "string" +}, +"sparkApplicationJobs": { +"description": "Output only. Data corresponding to a spark job.", "items": { -"$ref": "InstanceSelection" +"$ref": "JobData" }, +"readOnly": true, "type": "array" +} }, -"instanceSelectionResults": { -"description": "Output only. A list of instance selection results in the group.", +"type": "object" +}, +"SearchSessionSparkApplicationSqlQueriesResponse": { +"description": "List of all queries for a Spark Application.", +"id": "SearchSessionSparkApplicationSqlQueriesResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationSqlQueriesRequest.", +"type": "string" +}, +"sparkApplicationSqlQueries": { +"description": "Output only. SQL Execution Data", "items": { -"$ref": "InstanceSelectionResult" +"$ref": "SqlExecutionUiData" }, "readOnly": true, "type": "array" @@ -4607,1926 +9686,1965 @@ }, "type": "object" }, -"InstanceGroupAutoscalingPolicyConfig": { -"description": "Configuration for the size bounds of an instance group, including its proportional size to other groups.", -"id": "InstanceGroupAutoscalingPolicyConfig", +"SearchSessionSparkApplicationStageAttemptTasksResponse": { +"description": "List of tasks for a stage of a Spark Application", +"id": "SearchSessionSparkApplicationStageAttemptTasksResponse", "properties": { -"maxInstances": { -"description": "Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set.Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0.", -"format": "int32", -"type": "integer" +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptTasksRequest.", +"type": "string" }, -"minInstances": { -"description": "Optional. Minimum number of instances for this group.Primary workers - Bounds: 2, max_instances. Default: 2. Secondary workers - Bounds: 0, max_instances. Default: 0.", -"format": "int32", -"type": "integer" +"sparkApplicationStageAttemptTasks": { +"description": "Output only. Data corresponding to tasks created by spark.", +"items": { +"$ref": "TaskData" }, -"weight": { -"description": "Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker.The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if max_instances for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created.If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers.", -"format": "int32", -"type": "integer" +"readOnly": true, +"type": "array" } }, "type": "object" }, -"InstanceGroupConfig": { -"description": "The config settings for Compute Engine resources in an instance group, such as a master or worker group.", -"id": "InstanceGroupConfig", +"SearchSessionSparkApplicationStageAttemptsResponse": { +"description": "A list of Stage Attempts for a Stage of a Spark Application.", +"id": "SearchSessionSparkApplicationStageAttemptsResponse", "properties": { -"accelerators": { -"description": "Optional. The Compute Engine accelerator configuration for these instances.", +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStageAttemptsRequest.", +"type": "string" +}, +"sparkApplicationStageAttempts": { +"description": "Output only. Data corresponding to a stage attempts", "items": { -"$ref": "AcceleratorConfig" +"$ref": "StageData" }, +"readOnly": true, "type": "array" +} }, -"diskConfig": { -"$ref": "DiskConfig", -"description": "Optional. Disk option config settings." +"type": "object" }, -"imageUri": { -"description": "Optional. The Compute Engine image resource used for cluster instances.The URI can represent an image or image family.Image examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id] projects/[project_id]/global/images/[image-id] image-idImage family examples. Dataproc will use the most recent image from the family: https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name] projects/[project_id]/global/images/family/[custom-image-family-name]If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default.", +"SearchSessionSparkApplicationStagesResponse": { +"description": "A list of stages associated with a Spark Application.", +"id": "SearchSessionSparkApplicationStagesResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationStages.", "type": "string" }, -"instanceFlexibilityPolicy": { -"$ref": "InstanceFlexibilityPolicy", -"description": "Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning models." -}, -"instanceNames": { -"description": "Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group.", +"sparkApplicationStages": { +"description": "Output only. Data corresponding to a stage.", "items": { -"type": "string" +"$ref": "StageData" }, "readOnly": true, "type": "array" +} }, -"instanceReferences": { -"description": "Output only. List of references to Compute Engine instances.", +"type": "object" +}, +"SearchSessionSparkApplicationsResponse": { +"description": "A list of summary of Spark Applications", +"id": "SearchSessionSparkApplicationsResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSessionSparkApplicationsRequest.", +"type": "string" +}, +"sparkApplications": { +"description": "Output only. High level information corresponding to an application.", "items": { -"$ref": "InstanceReference" +"$ref": "SparkApplication" }, "readOnly": true, "type": "array" +} }, -"isPreemptible": { -"description": "Output only. Specifies that this instance group contains preemptible instances.", -"readOnly": true, -"type": "boolean" +"type": "object" }, -"machineTypeUri": { -"description": "Optional. The Compute Engine machine type used for cluster instances.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2 n1-standard-2Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2.", +"SearchSparkApplicationExecutorStageSummaryResponse": { +"description": "List of Executors associated with a Spark Application Stage.", +"id": "SearchSparkApplicationExecutorStageSummaryResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest.", "type": "string" }, -"managedGroupConfig": { -"$ref": "ManagedGroupConfig", -"description": "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", -"readOnly": true -}, -"minCpuPlatform": { -"description": "Optional. Specifies the minimum cpu platform for the Instance Group. See Dataproc -> Minimum CPU Platform (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", -"type": "string" +"sparkApplicationStageExecutors": { +"description": "Details about executors used by the application stage.", +"items": { +"$ref": "ExecutorStageSummary" }, -"minNumInstances": { -"description": "Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.", -"format": "int32", -"type": "integer" +"type": "array" +} }, -"numInstances": { -"description": "Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.", -"format": "int32", -"type": "integer" +"type": "object" }, -"preemptibility": { -"description": "Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.", -"enum": [ -"PREEMPTIBILITY_UNSPECIFIED", -"NON_PREEMPTIBLE", -"PREEMPTIBLE", -"SPOT" -], -"enumDescriptions": [ -"Preemptibility is unspecified, the system will choose the appropriate setting for each instance group.", -"Instances are non-preemptible.This option is allowed for all instance groups and is the only valid value for Master and Worker instance groups.", -"Instances are preemptible (https://cloud.google.com/compute/docs/instances/preemptible).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups.", -"Instances are Spot VMs (https://cloud.google.com/compute/docs/instances/spot).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups. Spot VMs are the latest version of preemptible VMs (https://cloud.google.com/compute/docs/instances/preemptible), and provide additional features." -], +"SearchSparkApplicationExecutorsResponse": { +"description": "List of Executors associated with a Spark Application.", +"id": "SearchSparkApplicationExecutorsResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationExecutorsListRequest.", "type": "string" }, -"startupConfig": { -"$ref": "StartupConfig", -"description": "Optional. Configuration to handle the startup of instances during cluster create and update process." +"sparkApplicationExecutors": { +"description": "Details about executors used by the application.", +"items": { +"$ref": "ExecutorSummary" +}, +"type": "array" } }, "type": "object" }, -"InstanceReference": { -"description": "A reference to a Compute Engine instance.", -"id": "InstanceReference", +"SearchSparkApplicationJobsResponse": { +"description": "A list of Jobs associated with a Spark Application.", +"id": "SearchSparkApplicationJobsResponse", "properties": { -"instanceId": { -"description": "The unique identifier of the Compute Engine instance.", +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationJobsRequest.", "type": "string" }, -"instanceName": { -"description": "The user-friendly name of the Compute Engine instance.", +"sparkApplicationJobs": { +"description": "Output only. Data corresponding to a spark job.", +"items": { +"$ref": "JobData" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"SearchSparkApplicationSqlQueriesResponse": { +"description": "List of all queries for a Spark Application.", +"id": "SearchSparkApplicationSqlQueriesResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationSqlQueriesRequest.", "type": "string" }, -"publicEciesKey": { -"description": "The public ECIES key used for sharing data with this instance.", -"type": "string" +"sparkApplicationSqlQueries": { +"description": "Output only. SQL Execution Data", +"items": { +"$ref": "SqlExecutionUiData" }, -"publicKey": { -"description": "The public RSA key used for sharing data with this instance.", -"type": "string" +"readOnly": true, +"type": "array" } }, "type": "object" }, -"InstanceSelection": { -"description": "Defines machines types and a rank to which the machines types belong.", -"id": "InstanceSelection", +"SearchSparkApplicationStageAttemptTasksResponse": { +"description": "List of tasks for a stage of a Spark Application", +"id": "SearchSparkApplicationStageAttemptTasksResponse", "properties": { -"machineTypes": { -"description": "Optional. Full machine-type names, e.g. \"n1-standard-16\".", -"items": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptTasksRequest.", "type": "string" }, -"type": "array" +"sparkApplicationStageAttemptTasks": { +"description": "Output only. Data corresponding to tasks created by spark.", +"items": { +"$ref": "TaskData" }, -"rank": { -"description": "Optional. Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.", -"format": "int32", -"type": "integer" +"readOnly": true, +"type": "array" } }, "type": "object" }, -"InstanceSelectionResult": { -"description": "Defines a mapping from machine types to the number of VMs that are created with each machine type.", -"id": "InstanceSelectionResult", +"SearchSparkApplicationStageAttemptsResponse": { +"description": "A list of Stage Attempts for a Stage of a Spark Application.", +"id": "SearchSparkApplicationStageAttemptsResponse", "properties": { -"machineType": { -"description": "Output only. Full machine-type names, e.g. \"n1-standard-16\".", -"readOnly": true, +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListSparkApplicationStageAttemptsRequest.", "type": "string" }, -"vmCount": { -"description": "Output only. Number of VM provisioned with the machine_type.", -"format": "int32", +"sparkApplicationStageAttempts": { +"description": "Output only. Data corresponding to a stage attempts", +"items": { +"$ref": "StageData" +}, "readOnly": true, -"type": "integer" +"type": "array" } }, "type": "object" }, -"InstantiateWorkflowTemplateRequest": { -"description": "A request to instantiate a workflow template.", -"id": "InstantiateWorkflowTemplateRequest", +"SearchSparkApplicationStagesResponse": { +"description": "A list of stages associated with a Spark Application.", +"id": "SearchSparkApplicationStagesResponse", "properties": { -"parameters": { -"additionalProperties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationStages.", "type": "string" }, -"description": "Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters.", +"sparkApplicationStages": { +"description": "Output only. Data corresponding to a stage.", +"items": { +"$ref": "StageData" +}, +"readOnly": true, +"type": "array" +} +}, "type": "object" }, -"requestId": { -"description": "Optional. A tag that prevents multiple concurrent workflow instances with the same tag from running. This mitigates risk of concurrent instances started due to retries.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"SearchSparkApplicationsResponse": { +"description": "A list of summary of Spark Applications", +"id": "SearchSparkApplicationsResponse", +"properties": { +"nextPageToken": { +"description": "This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent SearchSparkApplicationsRequest.", "type": "string" }, -"version": { -"description": "Optional. The version of workflow template to instantiate. If specified, the workflow will be instantiated only if the current version of the workflow template has the supplied version.This option cannot be used to instantiate a previous version of workflow template.", -"format": "int32", -"type": "integer" +"sparkApplications": { +"description": "Output only. High level information corresponding to an application.", +"items": { +"$ref": "SparkApplication" +}, +"readOnly": true, +"type": "array" } }, "type": "object" }, -"Interval": { -"description": "Represents a time interval, encoded as a Timestamp start (inclusive) and a Timestamp end (exclusive).The start must be less than or equal to the end. When the start equals the end, the interval is empty (matches no time). When both start and end are unspecified, the interval matches any time.", -"id": "Interval", +"SecurityConfig": { +"description": "Security related configuration, including encryption, Kerberos, etc.", +"id": "SecurityConfig", "properties": { -"endTime": { -"description": "Optional. Exclusive end of the interval.If specified, a Timestamp matching this interval will have to be before the end.", -"format": "google-datetime", -"type": "string" +"identityConfig": { +"$ref": "IdentityConfig", +"description": "Optional. Identity related configuration, including service account based secure multi-tenancy user mappings." }, -"startTime": { -"description": "Optional. Inclusive start of the interval.If specified, a Timestamp matching this interval will have to be the same or after the start.", -"format": "google-datetime", -"type": "string" +"kerberosConfig": { +"$ref": "KerberosConfig", +"description": "Optional. Kerberos related configuration." } }, "type": "object" }, -"Job": { -"description": "A Dataproc job resource.", -"id": "Job", +"Session": { +"description": "A representation of a session.", +"id": "Session", "properties": { -"done": { -"description": "Output only. Indicates whether the job is completed. If the value is false, the job is still in progress. If true, the job is completed, and status.state field will indicate if it was successful, failed, or cancelled.", +"createTime": { +"description": "Output only. The time when the session was created.", +"format": "google-datetime", "readOnly": true, -"type": "boolean" +"type": "string" }, -"driverControlFilesUri": { -"description": "Output only. If present, the location of miscellaneous control files which can be used as part of job setup and handling. If not present, control files might be placed in the same location as driver_output_uri.", +"creator": { +"description": "Output only. The email address of the user who created the session.", "readOnly": true, "type": "string" }, -"driverOutputResourceUri": { -"description": "Output only. A URI pointing to the location of the stdout of the job's driver program.", -"readOnly": true, +"environmentConfig": { +"$ref": "EnvironmentConfig", +"description": "Optional. Environment configuration for the session execution." +}, +"jupyterSession": { +"$ref": "JupyterConfig", +"description": "Optional. Jupyter session config." +}, +"labels": { +"additionalProperties": { "type": "string" }, -"driverSchedulingConfig": { -"$ref": "DriverSchedulingConfig", -"description": "Optional. Driver scheduling configuration." +"description": "Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", +"type": "object" }, -"flinkJob": { -"$ref": "FlinkJob", -"description": "Optional. Job is a Flink job." +"name": { +"description": "Required. The resource name of the session.", +"type": "string" }, -"hadoopJob": { -"$ref": "HadoopJob", -"description": "Optional. Job is a Hadoop job." +"runtimeConfig": { +"$ref": "RuntimeConfig", +"description": "Optional. Runtime configuration for the session execution." }, -"hiveJob": { -"$ref": "HiveJob", -"description": "Optional. Job is a Hive job." +"runtimeInfo": { +"$ref": "RuntimeInfo", +"description": "Output only. Runtime information about session execution.", +"readOnly": true }, -"jobUuid": { -"description": "Output only. A UUID that uniquely identifies a job within the project over time. This is in contrast to a user-settable reference.job_id that might be reused over time.", -"readOnly": true, +"sessionTemplate": { +"description": "Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.", "type": "string" }, -"labels": { -"additionalProperties": { +"sparkConnectSession": { +"$ref": "SparkConnectConfig", +"description": "Optional. Spark connect session config." +}, +"state": { +"description": "Output only. A state of the session.", +"enum": [ +"STATE_UNSPECIFIED", +"CREATING", +"ACTIVE", +"TERMINATING", +"TERMINATED", +"FAILED" +], +"enumDescriptions": [ +"The session state is unknown.", +"The session is created prior to running.", +"The session is running.", +"The session is terminating.", +"The session is terminated successfully.", +"The session is no longer running due to an error." +], +"readOnly": true, "type": "string" }, -"description": "Optional. The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.", -"type": "object" +"stateHistory": { +"description": "Output only. Historical state information for the session.", +"items": { +"$ref": "SessionStateHistory" }, -"pigJob": { -"$ref": "PigJob", -"description": "Optional. Job is a Pig job." +"readOnly": true, +"type": "array" }, -"placement": { -"$ref": "JobPlacement", -"description": "Required. Job information, including how, when, and where to run the job." +"stateMessage": { +"description": "Output only. Session state details, such as the failure description if the state is FAILED.", +"readOnly": true, +"type": "string" }, -"prestoJob": { -"$ref": "PrestoJob", -"description": "Optional. Job is a Presto job." +"stateTime": { +"description": "Output only. The time when the session entered the current state.", +"format": "google-datetime", +"readOnly": true, +"type": "string" }, -"pysparkJob": { -"$ref": "PySparkJob", -"description": "Optional. Job is a PySpark job." +"user": { +"description": "Optional. The email address of the user who owns the session.", +"type": "string" }, -"reference": { -"$ref": "JobReference", -"description": "Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id." +"uuid": { +"description": "Output only. A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.", +"readOnly": true, +"type": "string" +} }, -"scheduling": { -"$ref": "JobScheduling", -"description": "Optional. Job scheduling configuration." +"type": "object" }, -"sparkJob": { -"$ref": "SparkJob", -"description": "Optional. Job is a Spark job." +"SessionOperationMetadata": { +"description": "Metadata describing the Session operation.", +"id": "SessionOperationMetadata", +"properties": { +"createTime": { +"description": "The time when the operation was created.", +"format": "google-datetime", +"type": "string" }, -"sparkRJob": { -"$ref": "SparkRJob", -"description": "Optional. Job is a SparkR job." +"description": { +"description": "Short description of the operation.", +"type": "string" }, -"sparkSqlJob": { -"$ref": "SparkSqlJob", -"description": "Optional. Job is a SparkSql job." +"doneTime": { +"description": "The time when the operation was finished.", +"format": "google-datetime", +"type": "string" }, -"status": { -"$ref": "JobStatus", -"description": "Output only. The job status. Additional application-specific status information might be contained in the type_job and yarn_applications fields.", -"readOnly": true +"labels": { +"additionalProperties": { +"type": "string" }, -"statusHistory": { -"description": "Output only. The previous job status.", -"items": { -"$ref": "JobStatus" +"description": "Labels associated with the operation.", +"type": "object" +}, +"operationType": { +"description": "The operation type.", +"enum": [ +"SESSION_OPERATION_TYPE_UNSPECIFIED", +"CREATE", +"TERMINATE", +"DELETE" +], +"enumDescriptions": [ +"Session operation type is unknown.", +"Create Session operation type.", +"Terminate Session operation type.", +"Delete Session operation type." +], +"type": "string" }, -"readOnly": true, -"type": "array" +"session": { +"description": "Name of the session for the operation.", +"type": "string" }, -"trinoJob": { -"$ref": "TrinoJob", -"description": "Optional. Job is a Trino job." +"sessionUuid": { +"description": "Session UUID for the operation.", +"type": "string" }, -"yarnApplications": { -"description": "Output only. The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It might be changed before final release.", +"warnings": { +"description": "Warnings encountered during operation execution.", "items": { -"$ref": "YarnApplication" +"type": "string" }, -"readOnly": true, "type": "array" } }, "type": "object" }, -"JobMetadata": { -"description": "Job Operation metadata.", -"id": "JobMetadata", +"SessionStateHistory": { +"description": "Historical state information.", +"id": "SessionStateHistory", "properties": { -"jobId": { -"description": "Output only. The job id.", +"state": { +"description": "Output only. The state of the session at this point in the session history.", +"enum": [ +"STATE_UNSPECIFIED", +"CREATING", +"ACTIVE", +"TERMINATING", +"TERMINATED", +"FAILED" +], +"enumDescriptions": [ +"The session state is unknown.", +"The session is created prior to running.", +"The session is running.", +"The session is terminating.", +"The session is terminated successfully.", +"The session is no longer running due to an error." +], "readOnly": true, "type": "string" }, -"operationType": { -"description": "Output only. Operation type.", +"stateMessage": { +"description": "Output only. Details about the state at this point in the session history.", "readOnly": true, "type": "string" }, -"startTime": { -"description": "Output only. Job submission time.", +"stateStartTime": { +"description": "Output only. The time when the session entered the historical state.", "format": "google-datetime", "readOnly": true, "type": "string" -}, -"status": { -"$ref": "JobStatus", -"description": "Output only. Most recent job status.", -"readOnly": true } }, "type": "object" }, -"JobPlacement": { -"description": "Dataproc job config.", -"id": "JobPlacement", +"SessionTemplate": { +"description": "A representation of a session template.", +"id": "SessionTemplate", "properties": { -"clusterLabels": { +"createTime": { +"description": "Output only. The time when the template was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"creator": { +"description": "Output only. The email address of the user who created the template.", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. Brief description of the template.", +"type": "string" +}, +"environmentConfig": { +"$ref": "EnvironmentConfig", +"description": "Optional. Environment configuration for session execution." +}, +"jupyterSession": { +"$ref": "JupyterConfig", +"description": "Optional. Jupyter session config." +}, +"labels": { "additionalProperties": { "type": "string" }, -"description": "Optional. Cluster labels to identify a cluster where the job will be submitted.", +"description": "Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", "type": "object" }, -"clusterName": { -"description": "Required. The name of the cluster where the job will be submitted.", +"name": { +"description": "Required. The resource name of the session template.", "type": "string" }, -"clusterUuid": { -"description": "Output only. A cluster UUID generated by the Dataproc service when the job is submitted.", +"runtimeConfig": { +"$ref": "RuntimeConfig", +"description": "Optional. Runtime configuration for session execution." +}, +"sparkConnectSession": { +"$ref": "SparkConnectConfig", +"description": "Optional. Spark connect session config." +}, +"updateTime": { +"description": "Output only. The time the template was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"uuid": { +"description": "Output only. A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.", "readOnly": true, "type": "string" } }, "type": "object" }, -"JobReference": { -"description": "Encapsulates the full scoping used to reference a job.", -"id": "JobReference", +"SetIamPolicyRequest": { +"description": "Request message for SetIamPolicy method.", +"id": "SetIamPolicyRequest", "properties": { -"jobId": { -"description": "Optional. The job ID, which must be unique within the project.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.If not specified by the caller, the job ID will be provided by the server.", -"type": "string" -}, -"projectId": { -"description": "Optional. The ID of the Google Cloud Platform project that the job belongs to. If specified, must match the request project ID.", -"type": "string" +"policy": { +"$ref": "Policy", +"description": "REQUIRED: The complete policy to be applied to the resource. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." } }, "type": "object" }, -"JobScheduling": { -"description": "Job scheduling options.", -"id": "JobScheduling", +"ShieldedInstanceConfig": { +"description": "Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).", +"id": "ShieldedInstanceConfig", "properties": { -"maxFailuresPerHour": { -"description": "Optional. Maximum number of times per hour a driver can be restarted as a result of driver exiting with non-zero code before job is reported failed.A job might be reported as thrashing if the driver exits with a non-zero code four times within a 10-minute window.Maximum value is 10.Note: This restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", -"format": "int32", -"type": "integer" +"enableIntegrityMonitoring": { +"description": "Optional. Defines whether instances have integrity monitoring enabled.", +"type": "boolean" }, -"maxFailuresTotal": { -"description": "Optional. Maximum total number of times a driver can be restarted as a result of the driver exiting with a non-zero code. After the maximum number is reached, the job will be reported as failed.Maximum value is 240.Note: Currently, this restartable job option is not supported in Dataproc workflow templates (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).", -"format": "int32", -"type": "integer" +"enableSecureBoot": { +"description": "Optional. Defines whether instances have Secure Boot enabled.", +"type": "boolean" +}, +"enableVtpm": { +"description": "Optional. Defines whether instances have the vTPM enabled.", +"type": "boolean" } }, "type": "object" }, -"JobStatus": { -"description": "Dataproc job status.", -"id": "JobStatus", +"ShufflePushReadMetrics": { +"id": "ShufflePushReadMetrics", "properties": { -"details": { -"description": "Optional. Output only. Job state details, such as an error description if the state is ERROR.", -"readOnly": true, +"corruptMergedBlockChunks": { +"format": "int64", "type": "string" }, -"state": { -"description": "Output only. A state message specifying the overall job state.", -"enum": [ -"STATE_UNSPECIFIED", -"PENDING", -"SETUP_DONE", -"RUNNING", -"CANCEL_PENDING", -"CANCEL_STARTED", -"CANCELLED", -"DONE", -"ERROR", -"ATTEMPT_FAILURE" -], -"enumDescriptions": [ -"The job state is unknown.", -"The job is pending; it has been submitted, but is not yet running.", -"Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.", -"The job is running on the cluster.", -"A CancelJob request has been received, but is pending.", -"Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.", -"The job cancellation was successful.", -"The job has completed successfully.", -"The job has completed, but encountered an error.", -"Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only." -], -"readOnly": true, +"localMergedBlocksFetched": { +"format": "int64", "type": "string" }, -"stateStartTime": { -"description": "Output only. The time when this state was entered.", -"format": "google-datetime", -"readOnly": true, +"localMergedBytesRead": { +"format": "int64", "type": "string" }, -"substate": { -"description": "Output only. Additional state information, which includes status reported by the agent.", -"enum": [ -"UNSPECIFIED", -"SUBMITTED", -"QUEUED", -"STALE_STATUS" -], -"enumDescriptions": [ -"The job substate is unknown.", -"The Job is submitted to the agent.Applies to RUNNING state.", -"The Job has been received and is awaiting execution (it might be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.", -"The agent-reported status is out of date, which can be caused by a loss of communication between the agent and Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state." -], -"readOnly": true, +"localMergedChunksFetched": { +"format": "int64", "type": "string" -} }, -"type": "object" +"mergedFetchFallbackCount": { +"format": "int64", +"type": "string" }, -"JupyterConfig": { -"description": "Jupyter configuration for an interactive session.", -"id": "JupyterConfig", -"properties": { -"displayName": { -"description": "Optional. Display name, shown in the Jupyter kernelspec card.", +"remoteMergedBlocksFetched": { +"format": "int64", "type": "string" }, -"kernel": { -"description": "Optional. Kernel", -"enum": [ -"KERNEL_UNSPECIFIED", -"PYTHON", -"SCALA" -], -"enumDescriptions": [ -"The kernel is unknown.", -"Python kernel.", -"Scala kernel." -], +"remoteMergedBytesRead": { +"format": "int64", +"type": "string" +}, +"remoteMergedChunksFetched": { +"format": "int64", +"type": "string" +}, +"remoteMergedReqsDuration": { +"format": "int64", "type": "string" } }, "type": "object" }, -"KerberosConfig": { -"description": "Specifies Kerberos related configuration.", -"id": "KerberosConfig", +"ShufflePushReadQuantileMetrics": { +"id": "ShufflePushReadQuantileMetrics", "properties": { -"crossRealmTrustAdminServer": { -"description": "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", -"type": "string" +"corruptMergedBlockChunks": { +"$ref": "Quantiles" +}, +"localMergedBlocksFetched": { +"$ref": "Quantiles" +}, +"localMergedBytesRead": { +"$ref": "Quantiles" +}, +"localMergedChunksFetched": { +"$ref": "Quantiles" }, -"crossRealmTrustKdc": { -"description": "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", -"type": "string" +"mergedFetchFallbackCount": { +"$ref": "Quantiles" }, -"crossRealmTrustRealm": { -"description": "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", -"type": "string" +"remoteMergedBlocksFetched": { +"$ref": "Quantiles" }, -"crossRealmTrustSharedPasswordUri": { -"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", -"type": "string" +"remoteMergedBytesRead": { +"$ref": "Quantiles" }, -"enableKerberos": { -"description": "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", -"type": "boolean" +"remoteMergedChunksFetched": { +"$ref": "Quantiles" }, -"kdcDbKeyUri": { -"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", -"type": "string" +"remoteMergedReqsDuration": { +"$ref": "Quantiles" +} }, -"keyPasswordUri": { -"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", -"type": "string" +"type": "object" }, -"keystorePasswordUri": { -"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", +"ShuffleReadMetrics": { +"description": "Shuffle data read by the task.", +"id": "ShuffleReadMetrics", +"properties": { +"fetchWaitTimeMillis": { +"format": "int64", "type": "string" }, -"keystoreUri": { -"description": "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", +"localBlocksFetched": { +"format": "int64", "type": "string" }, -"kmsKeyUri": { -"description": "Optional. The URI of the KMS key used to encrypt sensitive files.", +"localBytesRead": { +"format": "int64", "type": "string" }, -"realm": { -"description": "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", +"recordsRead": { +"format": "int64", "type": "string" }, -"rootPrincipalPasswordUri": { -"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", +"remoteBlocksFetched": { +"format": "int64", "type": "string" }, -"tgtLifetimeHours": { -"description": "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", -"format": "int32", -"type": "integer" +"remoteBytesRead": { +"format": "int64", +"type": "string" }, -"truststorePasswordUri": { -"description": "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", +"remoteBytesReadToDisk": { +"format": "int64", "type": "string" }, -"truststoreUri": { -"description": "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", +"remoteReqsDuration": { +"format": "int64", "type": "string" +}, +"shufflePushReadMetrics": { +"$ref": "ShufflePushReadMetrics" } }, "type": "object" }, -"KubernetesClusterConfig": { -"description": "The configuration for running the Dataproc cluster on Kubernetes.", -"id": "KubernetesClusterConfig", +"ShuffleReadQuantileMetrics": { +"id": "ShuffleReadQuantileMetrics", "properties": { -"gkeClusterConfig": { -"$ref": "GkeClusterConfig", -"description": "Required. The configuration for running the Dataproc cluster on GKE." +"fetchWaitTimeMillis": { +"$ref": "Quantiles" }, -"kubernetesNamespace": { -"description": "Optional. A namespace within the Kubernetes cluster to deploy into. If this namespace does not exist, it is created. If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. If not specified, the name of the Dataproc Cluster is used.", -"type": "string" +"localBlocksFetched": { +"$ref": "Quantiles" }, -"kubernetesSoftwareConfig": { -"$ref": "KubernetesSoftwareConfig", -"description": "Optional. The software configuration for this Dataproc cluster running on Kubernetes." +"readBytes": { +"$ref": "Quantiles" +}, +"readRecords": { +"$ref": "Quantiles" +}, +"remoteBlocksFetched": { +"$ref": "Quantiles" +}, +"remoteBytesRead": { +"$ref": "Quantiles" +}, +"remoteBytesReadToDisk": { +"$ref": "Quantiles" +}, +"remoteReqsDuration": { +"$ref": "Quantiles" +}, +"shufflePushReadMetrics": { +"$ref": "ShufflePushReadQuantileMetrics" +}, +"totalBlocksFetched": { +"$ref": "Quantiles" } }, "type": "object" }, -"KubernetesSoftwareConfig": { -"description": "The software configuration for this Dataproc cluster running on Kubernetes.", -"id": "KubernetesSoftwareConfig", +"ShuffleWriteMetrics": { +"description": "Shuffle data written by task.", +"id": "ShuffleWriteMetrics", "properties": { -"componentVersion": { -"additionalProperties": { +"bytesWritten": { +"format": "int64", "type": "string" }, -"description": "The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified.", -"type": "object" +"recordsWritten": { +"format": "int64", +"type": "string" }, -"properties": { -"additionalProperties": { +"writeTimeNanos": { +"format": "int64", "type": "string" +} }, -"description": "The properties to set on daemon config files.Property keys are specified in prefix:property format, for example spark:spark.kubernetes.container.image. The following are supported prefixes and their mappings: spark: spark-defaults.confFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", "type": "object" +}, +"ShuffleWriteQuantileMetrics": { +"id": "ShuffleWriteQuantileMetrics", +"properties": { +"writeBytes": { +"$ref": "Quantiles" +}, +"writeRecords": { +"$ref": "Quantiles" +}, +"writeTimeNanos": { +"$ref": "Quantiles" } }, "type": "object" }, -"LifecycleConfig": { -"description": "Specifies the cluster auto-delete schedule configuration.", -"id": "LifecycleConfig", +"SinkProgress": { +"id": "SinkProgress", "properties": { -"autoDeleteTime": { -"description": "Optional. The time when cluster will be auto-deleted (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", -"format": "google-datetime", +"description": { "type": "string" }, -"autoDeleteTtl": { -"description": "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", -"format": "google-duration", +"metrics": { +"additionalProperties": { "type": "string" }, -"idleDeleteTtl": { -"description": "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).", -"format": "google-duration", -"type": "string" +"type": "object" }, -"idleStartTime": { -"description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).", -"format": "google-datetime", -"readOnly": true, +"numOutputRows": { +"format": "int64", "type": "string" } }, "type": "object" }, -"ListAutoscalingPoliciesResponse": { -"description": "A response to a request to list autoscaling policies in a project.", -"id": "ListAutoscalingPoliciesResponse", +"SoftwareConfig": { +"description": "Specifies the selection and config of software inside the cluster.", +"id": "SoftwareConfig", "properties": { -"nextPageToken": { -"description": "Output only. This token is included in the response if there are more results to fetch.", -"readOnly": true, +"imageVersion": { +"description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", "type": "string" }, -"policies": { -"description": "Output only. Autoscaling policies list.", +"optionalComponents": { +"description": "Optional. The set of components to activate on the cluster.", "items": { -"$ref": "AutoscalingPolicy" +"enum": [ +"COMPONENT_UNSPECIFIED", +"ANACONDA", +"DOCKER", +"DRUID", +"FLINK", +"HBASE", +"HIVE_WEBHCAT", +"HUDI", +"JUPYTER", +"PRESTO", +"TRINO", +"RANGER", +"SOLR", +"ZEPPELIN", +"ZOOKEEPER" +], +"enumDescriptions": [ +"Unspecified component. Specifying this will cause Cluster creation to fail.", +"The Anaconda component is no longer supported or applicable to supported Dataproc on Compute Engine image versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions). It cannot be activated on clusters created with supported Dataproc on Compute Engine image versions.", +"Docker", +"The Druid query engine. (alpha)", +"Flink", +"HBase. (beta)", +"The Hive Web HCatalog (the REST service for accessing HCatalog).", +"Hudi.", +"The Jupyter Notebook.", +"The Presto query engine.", +"The Trino query engine.", +"The Ranger service.", +"The Solr service.", +"The Zeppelin notebook.", +"The Zookeeper service." +], +"type": "string" }, -"readOnly": true, "type": "array" +}, +"properties": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", +"type": "object" } }, "type": "object" }, -"ListBatchesResponse": { -"description": "A list of batch workloads.", -"id": "ListBatchesResponse", +"SourceProgress": { +"id": "SourceProgress", "properties": { -"batches": { -"description": "Output only. The batches from the specified collection.", -"items": { -"$ref": "Batch" +"description": { +"type": "string" }, -"readOnly": true, -"type": "array" +"endOffset": { +"type": "string" }, -"nextPageToken": { -"description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"inputRowsPerSecond": { +"format": "double", +"type": "number" +}, +"latestOffset": { "type": "string" }, -"unreachable": { -"description": "Output only. List of Batches that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", -"items": { +"metrics": { +"additionalProperties": { "type": "string" }, -"readOnly": true, -"type": "array" +"type": "object" +}, +"numInputRows": { +"format": "int64", +"type": "string" +}, +"processedRowsPerSecond": { +"format": "double", +"type": "number" +}, +"startOffset": { +"type": "string" } }, "type": "object" }, -"ListClustersResponse": { -"description": "The list of all clusters in a project.", -"id": "ListClustersResponse", +"SparkApplication": { +"description": "A summary of Spark Application", +"id": "SparkApplication", "properties": { -"clusters": { -"description": "Output only. The clusters in the project.", -"items": { -"$ref": "Cluster" -}, -"readOnly": true, -"type": "array" +"application": { +"$ref": "ApplicationInfo", +"description": "Output only. High level information corresponding to an application.", +"readOnly": true }, -"nextPageToken": { -"description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListClustersRequest.", -"readOnly": true, +"name": { +"description": "Identifier. Name of the spark application", "type": "string" } }, "type": "object" }, -"ListJobsResponse": { -"description": "A list of jobs in a project.", -"id": "ListJobsResponse", +"SparkBatch": { +"description": "A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.", +"id": "SparkBatch", "properties": { -"jobs": { -"description": "Output only. Jobs list.", +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", "items": { -"$ref": "Job" +"type": "string" }, -"readOnly": true, "type": "array" }, -"nextPageToken": { -"description": "Optional. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListJobsRequest.", -"type": "string" -}, -"unreachable": { -"description": "Output only. List of jobs with kms_key-encrypted parameters that could not be decrypted. A response to a jobs.get request may indicate the reason for the decryption failure for a specific job.", +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", "items": { "type": "string" }, -"readOnly": true, "type": "array" -} -}, -"type": "object" }, -"ListOperationsResponse": { -"description": "The response message for Operations.ListOperations.", -"id": "ListOperationsResponse", -"properties": { -"nextPageToken": { -"description": "The standard List next-page token.", +"fileUris": { +"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", +"items": { "type": "string" }, -"operations": { -"description": "A list of operations that matches the specified filter in the request.", +"type": "array" +}, +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", "items": { -"$ref": "Operation" +"type": "string" }, "type": "array" -} -}, -"type": "object" }, -"ListSessionTemplatesResponse": { -"description": "A list of session templates.", -"id": "ListSessionTemplatesResponse", -"properties": { -"nextPageToken": { -"description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"mainClass": { +"description": "Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.", "type": "string" }, -"sessionTemplates": { -"description": "Output only. Session template list", -"items": { -"$ref": "SessionTemplate" -}, -"readOnly": true, -"type": "array" +"mainJarFileUri": { +"description": "Optional. The HCFS URI of the jar file that contains the main class.", +"type": "string" } }, "type": "object" }, -"ListSessionsResponse": { -"description": "A list of interactive sessions.", -"id": "ListSessionsResponse", +"SparkConnectConfig": { +"description": "Spark connect configuration for an interactive session.", +"id": "SparkConnectConfig", +"properties": {}, +"type": "object" +}, +"SparkHistoryServerConfig": { +"description": "Spark History Server configuration for the workload.", +"id": "SparkHistoryServerConfig", "properties": { -"nextPageToken": { -"description": "A token, which can be sent as page_token, to retrieve the next page. If this field is omitted, there are no subsequent pages.", +"dataprocCluster": { +"description": "Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]", "type": "string" -}, -"sessions": { -"description": "Output only. The sessions from the specified collection.", -"items": { -"$ref": "Session" -}, -"readOnly": true, -"type": "array" } }, "type": "object" }, -"ListWorkflowTemplatesResponse": { -"description": "A response to a request to list workflow templates in a project.", -"id": "ListWorkflowTemplatesResponse", +"SparkJob": { +"description": "A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.", +"id": "SparkJob", "properties": { -"nextPageToken": { -"description": "Output only. This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent ListWorkflowTemplatesRequest.", -"readOnly": true, +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", +"items": { "type": "string" }, -"templates": { -"description": "Output only. WorkflowTemplates list.", +"type": "array" +}, +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", "items": { -"$ref": "WorkflowTemplate" +"type": "string" }, -"readOnly": true, "type": "array" }, -"unreachable": { -"description": "Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", +"fileUris": { +"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, -"readOnly": true, "type": "array" -} -}, -"type": "object" }, -"LoggingConfig": { -"description": "The runtime logging config of the job.", -"id": "LoggingConfig", -"properties": { -"driverLogLevels": { -"additionalProperties": { -"enum": [ -"LEVEL_UNSPECIFIED", -"ALL", -"TRACE", -"DEBUG", -"INFO", -"WARN", -"ERROR", -"FATAL", -"OFF" -], -"enumDescriptions": [ -"Level is unspecified. Use default level for log4j.", -"Use ALL level for log4j.", -"Use TRACE level for log4j.", -"Use DEBUG level for log4j.", -"Use INFO level for log4j.", -"Use WARN level for log4j.", -"Use ERROR level for log4j.", -"Use FATAL level for log4j.", -"Turn off log4j." -], +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", +"items": { "type": "string" }, -"description": "The per-package log levels for the driver. This can include \"root\" package name to configure rootLogger. Examples: - 'com.google = FATAL' - 'root = INFO' - 'org.apache = DEBUG'", -"type": "object" -} +"type": "array" }, -"type": "object" +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." }, -"ManagedCluster": { -"description": "Cluster that is managed by the workflow.", -"id": "ManagedCluster", -"properties": { -"clusterName": { -"description": "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", +"mainClass": { +"description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.", "type": "string" }, -"config": { -"$ref": "ClusterConfig", -"description": "Required. The cluster configuration." +"mainJarFileUri": { +"description": "The HCFS URI of the jar file that contains the main class.", +"type": "string" }, -"labels": { +"properties": { "additionalProperties": { "type": "string" }, -"description": "Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.", +"description": "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", "type": "object" } }, "type": "object" }, -"ManagedGroupConfig": { -"description": "Specifies the resources used to actively manage an instance group.", -"id": "ManagedGroupConfig", +"SparkPlanGraph": { +"description": "A graph used for storing information of an executionPlan of DataFrame.", +"id": "SparkPlanGraph", "properties": { -"instanceGroupManagerName": { -"description": "Output only. The name of the Instance Group Manager for this group.", -"readOnly": true, -"type": "string" +"edges": { +"items": { +"$ref": "SparkPlanGraphEdge" }, -"instanceGroupManagerUri": { -"description": "Output only. The partial URI to the instance group manager for this group. E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.", -"readOnly": true, -"type": "string" +"type": "array" }, -"instanceTemplateName": { -"description": "Output only. The name of the Instance Template used for the Managed Instance Group.", -"readOnly": true, +"executionId": { +"format": "int64", "type": "string" -} }, -"type": "object" +"nodes": { +"items": { +"$ref": "SparkPlanGraphNodeWrapper" }, -"MetastoreConfig": { -"description": "Specifies a Metastore configuration.", -"id": "MetastoreConfig", -"properties": { -"dataprocMetastoreService": { -"description": "Required. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[dataproc_region]/services/[service-name]", -"type": "string" +"type": "array" } }, "type": "object" }, -"Metric": { -"description": "A Dataproc custom metric.", -"id": "Metric", +"SparkPlanGraphCluster": { +"description": "Represents a tree of spark plan.", +"id": "SparkPlanGraphCluster", "properties": { -"metricOverrides": { -"description": "Optional. Specify one or more Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) to collect for the metric course (for the SPARK metric source (any Spark metric (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be specified).Provide metrics in the following format: METRIC_SOURCE: INSTANCE:GROUP:METRIC Use camelcase as appropriate.Examples: yarn:ResourceManager:QueueMetrics:AppsCompleted spark:driver:DAGScheduler:job.allJobs sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed hiveserver2:JVM:Memory:NonHeapMemoryUsage.used Notes: Only the specified overridden metrics are collected for the metric source. For example, if one or more spark:executive metrics are listed as metric overrides, other SPARK metrics are not collected. The collection of the metrics for other enabled custom metric sources is unaffected. For example, if both SPARK andd YARN metric sources are enabled, and overrides are provided for Spark metrics only, all YARN metrics are collected.", +"desc": { +"type": "string" +}, +"metrics": { "items": { +"$ref": "SqlPlanMetric" +}, +"type": "array" +}, +"name": { "type": "string" }, +"nodes": { +"items": { +"$ref": "SparkPlanGraphNodeWrapper" +}, "type": "array" }, -"metricSource": { -"description": "Required. A standard set of metrics is collected unless metricOverrides are specified for the metric source (see Custom metrics (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics) for more information).", -"enum": [ -"METRIC_SOURCE_UNSPECIFIED", -"MONITORING_AGENT_DEFAULTS", -"HDFS", -"SPARK", -"YARN", -"SPARK_HISTORY_SERVER", -"HIVESERVER2", -"HIVEMETASTORE", -"FLINK" -], -"enumDescriptions": [ -"Required unspecified metric source.", -"Monitoring agent metrics. If this source is enabled, Dataproc enables the monitoring agent in Compute Engine, and collects monitoring agent metrics, which are published with an agent.googleapis.com prefix.", -"HDFS metric source.", -"Spark metric source.", -"YARN metric source.", -"Spark History Server metric source.", -"Hiveserver2 metric source.", -"hivemetastore metric source", -"flink metric source" -], +"sparkPlanGraphClusterId": { +"format": "int64", "type": "string" } }, "type": "object" }, -"NamespacedGkeDeploymentTarget": { -"deprecated": true, -"description": "Deprecated. Used only for the deprecated beta. A full, namespace-isolated deployment target for an existing GKE cluster.", -"id": "NamespacedGkeDeploymentTarget", +"SparkPlanGraphEdge": { +"description": "Represents a directed edge in the spark plan tree from child to parent.", +"id": "SparkPlanGraphEdge", "properties": { -"clusterNamespace": { -"description": "Optional. A namespace within the GKE cluster to deploy into.", +"fromId": { +"format": "int64", "type": "string" }, -"targetGkeCluster": { -"description": "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", +"toId": { +"format": "int64", "type": "string" } }, "type": "object" }, -"NodeGroup": { -"description": "Dataproc Node Group. The Dataproc NodeGroup resource is not related to the Dataproc NodeGroupAffinity resource.", -"id": "NodeGroup", +"SparkPlanGraphNode": { +"description": "Represents a node in the spark plan tree.", +"id": "SparkPlanGraphNode", "properties": { -"labels": { -"additionalProperties": { +"desc": { "type": "string" }, -"description": "Optional. Node group labels. Label keys must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty. If specified, they must consist of from 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). The node group must have no more than 32 labelsn.", -"type": "object" +"metrics": { +"items": { +"$ref": "SqlPlanMetric" +}, +"type": "array" }, "name": { -"description": "The Node group resource name (https://aip.dev/122).", "type": "string" }, -"nodeGroupConfig": { -"$ref": "InstanceGroupConfig", -"description": "Optional. The node group instance group configuration." -}, -"roles": { -"description": "Required. Node group roles.", -"items": { -"enum": [ -"ROLE_UNSPECIFIED", -"DRIVER" -], -"enumDescriptions": [ -"Required unspecified role.", -"Job drivers run on the node pool." -], +"sparkPlanGraphNodeId": { +"format": "int64", "type": "string" +} }, -"type": "array" +"type": "object" +}, +"SparkPlanGraphNodeWrapper": { +"description": "Wrapper user to represent either a node or a cluster.", +"id": "SparkPlanGraphNodeWrapper", +"properties": { +"cluster": { +"$ref": "SparkPlanGraphCluster" +}, +"node": { +"$ref": "SparkPlanGraphNode" } }, "type": "object" }, -"NodeGroupAffinity": { -"description": "Node Group Affinity for clusters using sole-tenant node groups. The Dataproc NodeGroupAffinity resource is not related to the Dataproc NodeGroup resource.", -"id": "NodeGroupAffinity", +"SparkRBatch": { +"description": "A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.", +"id": "SparkRBatch", "properties": { -"nodeGroupUri": { -"description": "Required. The URI of a sole-tenant node group resource (https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on.A full URL, partial URI, or node group name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 projects/[project_id]/zones/[zone]/nodeGroups/node-group-1 node-group-1", +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", +"items": { "type": "string" -} }, -"type": "object" +"type": "array" }, -"NodeGroupOperationMetadata": { -"description": "Metadata describing the node group operation.", -"id": "NodeGroupOperationMetadata", -"properties": { -"clusterUuid": { -"description": "Output only. Cluster UUID associated with the node group operation.", -"readOnly": true, +"args": { +"description": "Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", +"items": { "type": "string" }, -"description": { -"description": "Output only. Short description of operation.", -"readOnly": true, -"type": "string" +"type": "array" }, -"labels": { -"additionalProperties": { +"fileUris": { +"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", +"items": { "type": "string" }, -"description": "Output only. Labels associated with the operation.", -"readOnly": true, -"type": "object" +"type": "array" }, -"nodeGroupId": { -"description": "Output only. Node group ID for the operation.", -"readOnly": true, +"mainRFileUri": { +"description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.", "type": "string" +} }, -"operationType": { -"description": "The operation type.", -"enum": [ -"NODE_GROUP_OPERATION_TYPE_UNSPECIFIED", -"CREATE", -"UPDATE", -"DELETE", -"RESIZE", -"REPAIR", -"UPDATE_LABELS", -"START", -"STOP" -], -"enumDescriptions": [ -"Node group operation type is unknown.", -"Create node group operation type.", -"Update node group operation type.", -"Delete node group operation type.", -"Resize node group operation type.", -"Repair node group operation type.", -"Update node group label operation type.", -"Start node group operation type.", -"Stop node group operation type." -], +"type": "object" +}, +"SparkRJob": { +"description": "A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.", +"id": "SparkRJob", +"properties": { +"archiveUris": { +"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", +"items": { "type": "string" }, -"status": { -"$ref": "ClusterOperationStatus", -"description": "Output only. Current operation status.", -"readOnly": true +"type": "array" }, -"statusHistory": { -"description": "Output only. The previous operation status.", +"args": { +"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", "items": { -"$ref": "ClusterOperationStatus" +"type": "string" }, -"readOnly": true, "type": "array" }, -"warnings": { -"description": "Output only. Errors encountered during operation execution.", +"fileUris": { +"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", "items": { "type": "string" }, -"readOnly": true, "type": "array" -} }, -"type": "object" +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." }, -"NodeInitializationAction": { -"description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.", -"id": "NodeInitializationAction", -"properties": { -"executableFile": { -"description": "Required. Cloud Storage URI of executable file.", +"mainRFileUri": { +"description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", "type": "string" }, -"executionTimeout": { -"description": "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", -"format": "google-duration", +"properties": { +"additionalProperties": { "type": "string" +}, +"description": "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", +"type": "object" } }, "type": "object" }, -"NodePool": { -"description": "indicating a list of workers of same type", -"id": "NodePool", +"SparkRuntimeInfo": { +"id": "SparkRuntimeInfo", "properties": { -"id": { -"description": "Required. A unique id of the node pool. Primary and Secondary workers can be specified using special reserved ids PRIMARY_WORKER_POOL and SECONDARY_WORKER_POOL respectively. Aux node pools can be referenced using corresponding pool id.", +"javaHome": { "type": "string" }, -"instanceNames": { -"description": "Name of instances to be repaired. These instances must belong to specified node pool.", -"items": { +"javaVersion": { "type": "string" }, -"type": "array" -}, -"repairAction": { -"description": "Required. Repair action to take on specified resources of the node pool.", -"enum": [ -"REPAIR_ACTION_UNSPECIFIED", -"DELETE" -], -"enumDescriptions": [ -"No action will be taken by default.", -"delete the specified list of nodes." -], +"scalaVersion": { "type": "string" } }, "type": "object" }, -"Operation": { -"description": "This resource represents a long-running operation that is the result of a network API call.", -"id": "Operation", +"SparkSqlBatch": { +"description": "A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.", +"id": "SparkSqlBatch", "properties": { -"done": { -"description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.", -"type": "boolean" -}, -"error": { -"$ref": "Status", -"description": "The error result of the operation in case of failure or cancellation." -}, -"metadata": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", +"items": { +"type": "string" }, -"description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", -"type": "object" +"type": "array" }, -"name": { -"description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.", +"queryFileUri": { +"description": "Required. The HCFS URI of the script that contains Spark SQL queries to execute.", "type": "string" }, -"response": { +"queryVariables": { "additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" +"type": "string" }, -"description": "The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.", +"description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", "type": "object" } }, "type": "object" }, -"OrderedJob": { -"description": "A job executed by the workflow.", -"id": "OrderedJob", +"SparkSqlJob": { +"description": "A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.", +"id": "SparkSqlJob", "properties": { -"flinkJob": { -"$ref": "FlinkJob", -"description": "Optional. Job is a Flink job." +"jarFileUris": { +"description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", +"items": { +"type": "string" }, -"hadoopJob": { -"$ref": "HadoopJob", -"description": "Optional. Job is a Hadoop job." +"type": "array" }, -"hiveJob": { -"$ref": "HiveJob", -"description": "Optional. Job is a Hive job." +"loggingConfig": { +"$ref": "LoggingConfig", +"description": "Optional. The runtime log config for job execution." }, -"labels": { +"properties": { "additionalProperties": { "type": "string" }, -"description": "Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given job.", +"description": "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.", "type": "object" }, -"pigJob": { -"$ref": "PigJob", -"description": "Optional. Job is a Pig job." -}, -"prerequisiteStepIds": { -"description": "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", -"items": { +"queryFileUri": { +"description": "The HCFS URI of the script that contains SQL queries.", "type": "string" }, -"type": "array" +"queryList": { +"$ref": "QueryList", +"description": "A list of queries." }, -"prestoJob": { -"$ref": "PrestoJob", -"description": "Optional. Job is a Presto job." +"scriptVariables": { +"additionalProperties": { +"type": "string" }, -"pysparkJob": { -"$ref": "PySparkJob", -"description": "Optional. Job is a PySpark job." +"description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", +"type": "object" +} }, -"scheduling": { -"$ref": "JobScheduling", -"description": "Optional. Job scheduling configuration." +"type": "object" }, -"sparkJob": { -"$ref": "SparkJob", -"description": "Optional. Job is a Spark job." +"SparkStandaloneAutoscalingConfig": { +"description": "Basic autoscaling configurations for Spark Standalone.", +"id": "SparkStandaloneAutoscalingConfig", +"properties": { +"gracefulDecommissionTimeout": { +"description": "Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.", +"format": "google-duration", +"type": "string" }, -"sparkRJob": { -"$ref": "SparkRJob", -"description": "Optional. Job is a SparkR job." +"removeOnlyIdleWorkers": { +"description": "Optional. Remove only idle workers when scaling down cluster", +"type": "boolean" }, -"sparkSqlJob": { -"$ref": "SparkSqlJob", -"description": "Optional. Job is a SparkSql job." +"scaleDownFactor": { +"description": "Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.", +"format": "double", +"type": "number" }, -"stepId": { -"description": "Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", -"type": "string" +"scaleDownMinWorkerFraction": { +"description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", +"format": "double", +"type": "number" }, -"trinoJob": { -"$ref": "TrinoJob", -"description": "Optional. Job is a Trino job." +"scaleUpFactor": { +"description": "Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", +"format": "double", +"type": "number" +}, +"scaleUpMinWorkerFraction": { +"description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", +"format": "double", +"type": "number" } }, "type": "object" }, -"ParameterValidation": { -"description": "Configuration for parameter validation.", -"id": "ParameterValidation", +"SparkWrapperObject": { +"description": "Outer message that contains the data obtained from spark listener, packaged with information that is required to process it.", +"id": "SparkWrapperObject", "properties": { -"regex": { -"$ref": "RegexValidation", -"description": "Validation based on regular expressions." +"appSummary": { +"$ref": "AppSummary" }, -"values": { -"$ref": "ValueValidation", -"description": "Validation based on a list of allowed values." -} +"applicationEnvironmentInfo": { +"$ref": "ApplicationEnvironmentInfo" }, -"type": "object" +"applicationId": { +"description": "Application Id created by Spark.", +"type": "string" }, -"PeripheralsConfig": { -"description": "Auxiliary services configuration for a workload.", -"id": "PeripheralsConfig", -"properties": { -"metastoreService": { -"description": "Optional. Resource name of an existing Dataproc Metastore service.Example: projects/[project_id]/locations/[region]/services/[service_id]", +"applicationInfo": { +"$ref": "ApplicationInfo" +}, +"eventTimestamp": { +"description": "VM Timestamp associated with the data object.", +"format": "google-datetime", "type": "string" }, -"sparkHistoryServerConfig": { -"$ref": "SparkHistoryServerConfig", -"description": "Optional. The Spark History Server configuration for the workload." -} +"executorStageSummary": { +"$ref": "ExecutorStageSummary" }, -"type": "object" +"executorSummary": { +"$ref": "ExecutorSummary" }, -"PigJob": { -"description": "A Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.", -"id": "PigJob", -"properties": { -"continueOnFailure": { -"description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", -"type": "boolean" +"jobData": { +"$ref": "JobData" }, -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", -"items": { -"type": "string" +"poolData": { +"$ref": "PoolData" }, -"type": "array" +"processSummary": { +"$ref": "ProcessSummary" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." +"rddOperationGraph": { +"$ref": "RddOperationGraph" }, -"properties": { -"additionalProperties": { -"type": "string" +"rddStorageInfo": { +"$ref": "RddStorageInfo" }, -"description": "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", -"type": "object" +"resourceProfileInfo": { +"$ref": "ResourceProfileInfo" }, -"queryFileUri": { -"description": "The HCFS URI of the script that contains the Pig queries.", -"type": "string" +"sparkPlanGraph": { +"$ref": "SparkPlanGraph" }, -"queryList": { -"$ref": "QueryList", -"description": "A list of queries." +"speculationStageSummary": { +"$ref": "SpeculationStageSummary" }, -"scriptVariables": { -"additionalProperties": { -"type": "string" +"sqlExecutionUiData": { +"$ref": "SqlExecutionUiData" }, -"description": "Optional. Mapping of query variable names to values (equivalent to the Pig command: name=[value]).", -"type": "object" +"stageData": { +"$ref": "StageData" +}, +"streamBlockData": { +"$ref": "StreamBlockData" +}, +"streamingQueryData": { +"$ref": "StreamingQueryData" +}, +"streamingQueryProgress": { +"$ref": "StreamingQueryProgress" +}, +"taskData": { +"$ref": "TaskData" } }, "type": "object" }, -"Policy": { -"description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources.A Policy is a collection of bindings. A binding binds one or more members, or principals, to a single role. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A role is a named list of permissions; each role can be an IAM predefined role or a user-created custom role.For some types of Google Cloud resources, a binding can also specify a condition, which is a logical expression that allows access to a resource only if the expression evaluates to true. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).JSON example: { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } YAML example: bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the IAM documentation (https://cloud.google.com/iam/docs/).", -"id": "Policy", +"SpeculationStageSummary": { +"description": "Details of the speculation task when speculative execution is enabled.", +"id": "SpeculationStageSummary", "properties": { -"bindings": { -"description": "Associates a list of members, or principals, with a role. Optionally, may specify a condition that determines how and when the bindings are applied. Each of the bindings must contain at least one principal.The bindings in a Policy can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the bindings grant 50 different roles to user:alice@example.com, and not to any other principal, then you can add another 1,450 principals to the bindings in the Policy.", -"items": { -"$ref": "Binding" +"numActiveTasks": { +"format": "int32", +"type": "integer" }, -"type": "array" +"numCompletedTasks": { +"format": "int32", +"type": "integer" }, -"etag": { -"description": "etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the etag in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An etag is returned in the response to getIamPolicy, and systems are expected to put that etag in the request to setIamPolicy to ensure that their change will be applied to the same version of the policy.Important: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.", -"format": "byte", -"type": "string" +"numFailedTasks": { +"format": "int32", +"type": "integer" }, -"version": { -"description": "Specifies the format of the policy.Valid values are 0, 1, and 3. Requests that specify an invalid value are rejected.Any operation that affects conditional role bindings must specify version 3. This requirement applies to the following operations: Getting a policy that includes a conditional role binding Adding a conditional role binding to a policy Changing a conditional role binding in a policy Removing any role binding, with or without a condition, from a policy that includes conditionsImportant: If you use IAM Conditions, you must include the etag field whenever you call setIamPolicy. If you omit this field, then IAM allows you to overwrite a version 3 policy with a version 1 policy, and all of the conditions in the version 3 policy are lost.If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).", +"numKilledTasks": { +"format": "int32", +"type": "integer" +}, +"numTasks": { +"format": "int32", +"type": "integer" +}, +"stageAttemptId": { "format": "int32", "type": "integer" +}, +"stageId": { +"format": "int64", +"type": "string" } }, "type": "object" }, -"PrestoJob": { -"description": "A Dataproc job for running Presto (https://prestosql.io/) queries. IMPORTANT: The Dataproc Presto Optional Component (https://cloud.google.com/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.", -"id": "PrestoJob", +"SqlExecutionUiData": { +"description": "SQL Execution Data", +"id": "SqlExecutionUiData", "properties": { -"clientTags": { -"description": "Optional. Presto client tags to attach to this query", -"items": { +"completionTime": { +"format": "google-datetime", "type": "string" }, -"type": "array" +"description": { +"type": "string" }, -"continueOnFailure": { -"description": "Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.", -"type": "boolean" +"details": { +"type": "string" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." +"errorMessage": { +"type": "string" }, -"outputFormat": { -"description": "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", +"executionId": { +"format": "int64", "type": "string" }, -"properties": { +"jobs": { "additionalProperties": { +"enum": [ +"JOB_EXECUTION_STATUS_UNSPECIFIED", +"JOB_EXECUTION_STATUS_RUNNING", +"JOB_EXECUTION_STATUS_SUCCEEDED", +"JOB_EXECUTION_STATUS_FAILED", +"JOB_EXECUTION_STATUS_UNKNOWN" +], +"enumDescriptions": [ +"", +"", +"", +"", +"" +], "type": "string" }, -"description": "Optional. A mapping of property names to values. Used to set Presto session properties (https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", "type": "object" }, -"queryFileUri": { -"description": "The HCFS URI of the script that contains SQL queries.", +"metricValues": { +"additionalProperties": { "type": "string" }, -"queryList": { -"$ref": "QueryList", -"description": "A list of queries." -} -}, "type": "object" }, -"PyPiRepositoryConfig": { -"description": "Configuration for PyPi repository", -"id": "PyPiRepositoryConfig", -"properties": { -"pypiRepository": { -"description": "Optional. PyPi repository address", -"type": "string" -} -}, -"type": "object" +"metricValuesIsNull": { +"type": "boolean" }, -"PySparkBatch": { -"description": "A configuration for running an Apache PySpark (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.", -"id": "PySparkBatch", -"properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", +"metrics": { "items": { -"type": "string" +"$ref": "SqlPlanMetric" }, "type": "array" }, -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", -"items": { +"modifiedConfigs": { +"additionalProperties": { "type": "string" }, -"type": "array" +"type": "object" }, -"fileUris": { -"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", -"items": { +"physicalPlanDescription": { "type": "string" }, -"type": "array" +"rootExecutionId": { +"format": "int64", +"type": "string" }, -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", +"stages": { "items": { +"format": "int64", "type": "string" }, "type": "array" }, -"mainPythonFileUri": { -"description": "Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.", +"submissionTime": { +"format": "google-datetime", "type": "string" +} }, -"pythonFileUris": { -"description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", -"items": { +"type": "object" +}, +"SqlPlanMetric": { +"description": "Metrics related to SQL execution.", +"id": "SqlPlanMetric", +"properties": { +"accumulatorId": { +"format": "int64", "type": "string" }, -"type": "array" +"metricType": { +"type": "string" +}, +"name": { +"type": "string" } }, "type": "object" }, -"PySparkJob": { -"description": "A Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.", -"id": "PySparkJob", +"StageAttemptTasksSummary": { +"description": "Data related to tasks summary for a Spark Stage Attempt", +"id": "StageAttemptTasksSummary", "properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", -"items": { +"applicationId": { "type": "string" }, -"type": "array" +"numFailedTasks": { +"format": "int32", +"type": "integer" }, -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", -"items": { -"type": "string" +"numKilledTasks": { +"format": "int32", +"type": "integer" }, -"type": "array" +"numPendingTasks": { +"format": "int32", +"type": "integer" }, -"fileUris": { -"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", -"items": { +"numRunningTasks": { +"format": "int32", +"type": "integer" +}, +"numSuccessTasks": { +"format": "int32", +"type": "integer" +}, +"numTasks": { +"format": "int32", +"type": "integer" +}, +"stageAttemptId": { +"format": "int32", +"type": "integer" +}, +"stageId": { +"format": "int64", "type": "string" +} }, -"type": "array" +"type": "object" }, -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", +"StageData": { +"description": "Data corresponding to a stage.", +"id": "StageData", +"properties": { +"accumulatorUpdates": { "items": { -"type": "string" +"$ref": "AccumulableInfo" }, "type": "array" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." +"completionTime": { +"format": "google-datetime", +"type": "string" }, -"mainPythonFileUri": { -"description": "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", +"description": { "type": "string" }, -"properties": { -"additionalProperties": { +"details": { "type": "string" }, -"description": "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", +"executorMetricsDistributions": { +"$ref": "ExecutorMetricsDistributions" +}, +"executorSummary": { +"additionalProperties": { +"$ref": "ExecutorStageSummary" +}, "type": "object" }, -"pythonFileUris": { -"description": "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", -"items": { +"failureReason": { "type": "string" }, -"type": "array" -} -}, -"type": "object" +"firstTaskLaunchedTime": { +"format": "google-datetime", +"type": "string" }, -"QueryList": { -"description": "A list of queries to run on a cluster.", -"id": "QueryList", -"properties": { -"queries": { -"description": "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } } ", +"isShufflePushEnabled": { +"type": "boolean" +}, +"jobIds": { "items": { +"format": "int64", "type": "string" }, "type": "array" -} +}, +"killedTasksSummary": { +"additionalProperties": { +"format": "int32", +"type": "integer" }, "type": "object" }, -"RegexValidation": { -"description": "Validation based on regular expressions.", -"id": "RegexValidation", -"properties": { -"regexes": { -"description": "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", -"items": { +"locality": { +"additionalProperties": { +"format": "int64", "type": "string" }, -"type": "array" -} -}, "type": "object" }, -"RepairClusterRequest": { -"description": "A request to repair a cluster.", -"id": "RepairClusterRequest", -"properties": { -"clusterUuid": { -"description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", +"name": { "type": "string" }, -"gracefulDecommissionTimeout": { -"description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning facilitates the removal of cluster nodes without interrupting jobs in progress. The timeout specifies the amount of time to wait for jobs finish before forcefully removing nodes. The default timeout is 0 for forceful decommissioning, and the maximum timeout period is 1 day. (see JSON Mapping\u2014Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).graceful_decommission_timeout is supported in Dataproc image versions 1.2+.", -"format": "google-duration", -"type": "string" +"numActiveTasks": { +"format": "int32", +"type": "integer" }, -"nodePools": { -"description": "Optional. Node pools and corresponding repair action to be taken. All node pools should be unique in this request. i.e. Multiple entries for the same node pool id are not allowed.", -"items": { -"$ref": "NodePool" +"numCompleteTasks": { +"format": "int32", +"type": "integer" }, -"type": "array" +"numCompletedIndices": { +"format": "int32", +"type": "integer" }, -"parentOperationId": { -"description": "Optional. operation id of the parent operation sending the repair request", -"type": "string" +"numFailedTasks": { +"format": "int32", +"type": "integer" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two RepairClusterRequests with the same ID, the second request is ignored, and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"type": "string" -} +"numKilledTasks": { +"format": "int32", +"type": "integer" }, -"type": "object" +"numTasks": { +"format": "int32", +"type": "integer" }, -"RepairNodeGroupRequest": { -"id": "RepairNodeGroupRequest", -"properties": { -"instanceNames": { -"description": "Required. Name of instances to be repaired. These instances must belong to specified node pool.", +"parentStageIds": { "items": { +"format": "int64", "type": "string" }, "type": "array" }, -"repairAction": { -"description": "Required. Repair action to take on specified resources of the node pool.", -"enum": [ -"REPAIR_ACTION_UNSPECIFIED", -"REPLACE" -], -"enumDescriptions": [ -"No action will be taken by default.", -"replace the specified list of nodes." -], +"peakExecutorMetrics": { +"$ref": "ExecutorMetrics" +}, +"rddIds": { +"items": { +"format": "int64", "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two RepairNodeGroupRequest with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "array" +}, +"resourceProfileId": { +"format": "int32", +"type": "integer" +}, +"schedulingPool": { "type": "string" -} }, -"type": "object" +"shuffleMergersCount": { +"format": "int32", +"type": "integer" }, -"RepositoryConfig": { -"description": "Configuration for dependency repositories", -"id": "RepositoryConfig", -"properties": { -"pypiRepositoryConfig": { -"$ref": "PyPiRepositoryConfig", -"description": "Optional. Configuration for PyPi repository." -} +"speculationSummary": { +"$ref": "SpeculationStageSummary" }, -"type": "object" +"stageAttemptId": { +"format": "int32", +"type": "integer" }, -"ReservationAffinity": { -"description": "Reservation Affinity for consuming Zonal reservation.", -"id": "ReservationAffinity", -"properties": { -"consumeReservationType": { -"description": "Optional. Type of reservation to consume", +"stageId": { +"format": "int64", +"type": "string" +}, +"stageMetrics": { +"$ref": "StageMetrics" +}, +"status": { "enum": [ -"TYPE_UNSPECIFIED", -"NO_RESERVATION", -"ANY_RESERVATION", -"SPECIFIC_RESERVATION" +"STAGE_STATUS_UNSPECIFIED", +"STAGE_STATUS_ACTIVE", +"STAGE_STATUS_COMPLETE", +"STAGE_STATUS_FAILED", +"STAGE_STATUS_PENDING", +"STAGE_STATUS_SKIPPED" ], "enumDescriptions": [ "", -"Do not consume from any allocated capacity.", -"Consume any reservation available.", -"Must consume from a specific reservation. Must specify key value fields for specifying the reservations." +"", +"", +"", +"", +"" ], "type": "string" }, -"key": { -"description": "Optional. Corresponds to the label key of reservation resource.", +"submissionTime": { +"format": "google-datetime", "type": "string" }, -"values": { -"description": "Optional. Corresponds to the label values of reservation resource.", -"items": { -"type": "string" +"taskQuantileMetrics": { +"$ref": "TaskQuantileMetrics", +"description": "Summary metrics fields. These are included in response only if present in summary_metrics_mask field in request" }, -"type": "array" +"tasks": { +"additionalProperties": { +"$ref": "TaskData" +}, +"type": "object" } }, "type": "object" }, -"ResizeNodeGroupRequest": { -"description": "A request to resize a node group.", -"id": "ResizeNodeGroupRequest", +"StageInputMetrics": { +"description": "Metrics about the input read by the stage.", +"id": "StageInputMetrics", "properties": { -"gracefulDecommissionTimeout": { -"description": "Optional. Timeout for graceful YARN decommissioning. Graceful decommissioning (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) allows the removal of nodes from the Compute Engine node group without interrupting jobs in progress. This timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)).Only supported on Dataproc image versions 1.2 and higher.", -"format": "google-duration", -"type": "string" -}, -"parentOperationId": { -"description": "Optional. operation id of the parent operation sending the resize request", +"bytesRead": { +"format": "int64", "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two ResizeNodeGroupRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests) with the same ID, the second request is ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"recordsRead": { +"format": "int64", "type": "string" -}, -"size": { -"description": "Required. The number of running instances for the node group to maintain. The group adds or removes instances to maintain the number of instances specified by this parameter.", -"format": "int32", -"type": "integer" } }, "type": "object" }, -"RuntimeConfig": { -"description": "Runtime configuration for a workload.", -"id": "RuntimeConfig", +"StageMetrics": { +"description": "Stage Level Aggregated Metrics", +"id": "StageMetrics", "properties": { -"autotuningConfig": { -"$ref": "AutotuningConfig", -"description": "Optional. Autotuning configuration of the workload." -}, -"cohort": { -"description": "Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.", +"diskBytesSpilled": { +"format": "int64", "type": "string" }, -"containerImage": { -"description": "Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used.", +"executorCpuTimeNanos": { +"format": "int64", "type": "string" }, -"properties": { -"additionalProperties": { +"executorDeserializeCpuTimeNanos": { +"format": "int64", "type": "string" }, -"description": "Optional. A mapping of property names to values, which are used to configure workload execution.", -"type": "object" -}, -"repositoryConfig": { -"$ref": "RepositoryConfig", -"description": "Optional. Dependency repository configuration." +"executorDeserializeTimeMillis": { +"format": "int64", +"type": "string" }, -"version": { -"description": "Optional. Version of the batch runtime.", +"executorRunTimeMillis": { +"format": "int64", "type": "string" -} }, -"type": "object" +"jvmGcTimeMillis": { +"format": "int64", +"type": "string" }, -"RuntimeInfo": { -"description": "Runtime information about workload execution.", -"id": "RuntimeInfo", -"properties": { -"approximateUsage": { -"$ref": "UsageMetrics", -"description": "Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments).", -"readOnly": true +"memoryBytesSpilled": { +"format": "int64", +"type": "string" }, -"currentUsage": { -"$ref": "UsageSnapshot", -"description": "Output only. Snapshot of current workload resource usage.", -"readOnly": true +"peakExecutionMemoryBytes": { +"format": "int64", +"type": "string" }, -"diagnosticOutputUri": { -"description": "Output only. A URI pointing to the location of the diagnostics tarball.", -"readOnly": true, +"resultSerializationTimeMillis": { +"format": "int64", "type": "string" }, -"endpoints": { -"additionalProperties": { +"resultSize": { +"format": "int64", "type": "string" }, -"description": "Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs.", -"readOnly": true, -"type": "object" +"stageInputMetrics": { +"$ref": "StageInputMetrics" }, -"outputUri": { -"description": "Output only. A URI pointing to the location of the stdout and stderr of the workload.", -"readOnly": true, -"type": "string" +"stageOutputMetrics": { +"$ref": "StageOutputMetrics" +}, +"stageShuffleReadMetrics": { +"$ref": "StageShuffleReadMetrics" +}, +"stageShuffleWriteMetrics": { +"$ref": "StageShuffleWriteMetrics" } }, "type": "object" }, -"SecurityConfig": { -"description": "Security related configuration, including encryption, Kerberos, etc.", -"id": "SecurityConfig", +"StageOutputMetrics": { +"description": "Metrics about the output written by the stage.", +"id": "StageOutputMetrics", "properties": { -"identityConfig": { -"$ref": "IdentityConfig", -"description": "Optional. Identity related configuration, including service account based secure multi-tenancy user mappings." +"bytesWritten": { +"format": "int64", +"type": "string" }, -"kerberosConfig": { -"$ref": "KerberosConfig", -"description": "Optional. Kerberos related configuration." +"recordsWritten": { +"format": "int64", +"type": "string" } }, "type": "object" }, -"Session": { -"description": "A representation of a session.", -"id": "Session", +"StageShufflePushReadMetrics": { +"id": "StageShufflePushReadMetrics", "properties": { -"createTime": { -"description": "Output only. The time when the session was created.", -"format": "google-datetime", -"readOnly": true, +"corruptMergedBlockChunks": { +"format": "int64", "type": "string" }, -"creator": { -"description": "Output only. The email address of the user who created the session.", -"readOnly": true, +"localMergedBlocksFetched": { +"format": "int64", "type": "string" }, -"environmentConfig": { -"$ref": "EnvironmentConfig", -"description": "Optional. Environment configuration for the session execution." +"localMergedBytesRead": { +"format": "int64", +"type": "string" }, -"jupyterSession": { -"$ref": "JupyterConfig", -"description": "Optional. Jupyter session config." +"localMergedChunksFetched": { +"format": "int64", +"type": "string" }, -"labels": { -"additionalProperties": { +"mergedFetchFallbackCount": { +"format": "int64", "type": "string" }, -"description": "Optional. The labels to associate with the session. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", -"type": "object" +"remoteMergedBlocksFetched": { +"format": "int64", +"type": "string" +}, +"remoteMergedBytesRead": { +"format": "int64", +"type": "string" }, -"name": { -"description": "Required. The resource name of the session.", +"remoteMergedChunksFetched": { +"format": "int64", "type": "string" }, -"runtimeConfig": { -"$ref": "RuntimeConfig", -"description": "Optional. Runtime configuration for the session execution." +"remoteMergedReqsDuration": { +"format": "int64", +"type": "string" +} }, -"runtimeInfo": { -"$ref": "RuntimeInfo", -"description": "Output only. Runtime information about session execution.", -"readOnly": true +"type": "object" }, -"sessionTemplate": { -"description": "Optional. The session template used by the session.Only resource names, including project ID and location, are valid.Example: * https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id] * projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]The template must be in the same project and Dataproc region as the session.", +"StageShuffleReadMetrics": { +"description": "Shuffle data read for the stage.", +"id": "StageShuffleReadMetrics", +"properties": { +"bytesRead": { +"format": "int64", "type": "string" }, -"sparkConnectSession": { -"$ref": "SparkConnectConfig", -"description": "Optional. Spark connect session config." +"fetchWaitTimeMillis": { +"format": "int64", +"type": "string" }, -"state": { -"description": "Output only. A state of the session.", -"enum": [ -"STATE_UNSPECIFIED", -"CREATING", -"ACTIVE", -"TERMINATING", -"TERMINATED", -"FAILED" -], -"enumDescriptions": [ -"The session state is unknown.", -"The session is created prior to running.", -"The session is running.", -"The session is terminating.", -"The session is terminated successfully.", -"The session is no longer running due to an error." -], -"readOnly": true, +"localBlocksFetched": { +"format": "int64", "type": "string" }, -"stateHistory": { -"description": "Output only. Historical state information for the session.", -"items": { -"$ref": "SessionStateHistory" +"localBytesRead": { +"format": "int64", +"type": "string" }, -"readOnly": true, -"type": "array" +"recordsRead": { +"format": "int64", +"type": "string" }, -"stateMessage": { -"description": "Output only. Session state details, such as the failure description if the state is FAILED.", -"readOnly": true, +"remoteBlocksFetched": { +"format": "int64", "type": "string" }, -"stateTime": { -"description": "Output only. The time when the session entered the current state.", -"format": "google-datetime", -"readOnly": true, +"remoteBytesRead": { +"format": "int64", "type": "string" }, -"user": { -"description": "Optional. The email address of the user who owns the session.", +"remoteBytesReadToDisk": { +"format": "int64", "type": "string" }, -"uuid": { -"description": "Output only. A session UUID (Unique Universal Identifier). The service generates this value when it creates the session.", -"readOnly": true, +"remoteReqsDuration": { +"format": "int64", "type": "string" +}, +"stageShufflePushReadMetrics": { +"$ref": "StageShufflePushReadMetrics" } }, "type": "object" }, -"SessionOperationMetadata": { -"description": "Metadata describing the Session operation.", -"id": "SessionOperationMetadata", +"StageShuffleWriteMetrics": { +"description": "Shuffle data written for the stage.", +"id": "StageShuffleWriteMetrics", "properties": { -"createTime": { -"description": "The time when the operation was created.", -"format": "google-datetime", -"type": "string" -}, -"description": { -"description": "Short description of the operation.", +"bytesWritten": { +"format": "int64", "type": "string" }, -"doneTime": { -"description": "The time when the operation was finished.", -"format": "google-datetime", +"recordsWritten": { +"format": "int64", "type": "string" }, -"labels": { -"additionalProperties": { +"writeTimeNanos": { +"format": "int64", "type": "string" +} }, -"description": "Labels associated with the operation.", "type": "object" }, -"operationType": { -"description": "The operation type.", -"enum": [ -"SESSION_OPERATION_TYPE_UNSPECIFIED", -"CREATE", -"TERMINATE", -"DELETE" -], -"enumDescriptions": [ -"Session operation type is unknown.", -"Create Session operation type.", -"Terminate Session operation type.", -"Delete Session operation type." -], +"StagesSummary": { +"description": "Data related to Stages page summary", +"id": "StagesSummary", +"properties": { +"applicationId": { "type": "string" }, -"session": { -"description": "Name of the session for the operation.", -"type": "string" +"numActiveStages": { +"format": "int32", +"type": "integer" }, -"sessionUuid": { -"description": "Session UUID for the operation.", +"numCompletedStages": { +"format": "int32", +"type": "integer" +}, +"numFailedStages": { +"format": "int32", +"type": "integer" +}, +"numPendingStages": { +"format": "int32", +"type": "integer" +}, +"numSkippedStages": { +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"StartClusterRequest": { +"description": "A request to start a cluster.", +"id": "StartClusterRequest", +"properties": { +"clusterUuid": { +"description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", "type": "string" }, -"warnings": { -"description": "Warnings encountered during operation execution.", -"items": { +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" +} }, -"type": "array" +"type": "object" +}, +"StartupConfig": { +"description": "Configuration to handle the startup of instances during cluster create and update process.", +"id": "StartupConfig", +"properties": { +"requiredRegistrationFraction": { +"description": "Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).", +"format": "double", +"type": "number" } }, "type": "object" }, -"SessionStateHistory": { +"StateHistory": { "description": "Historical state information.", -"id": "SessionStateHistory", +"id": "StateHistory", "properties": { "state": { -"description": "Output only. The state of the session at this point in the session history.", +"description": "Output only. The state of the batch at this point in history.", "enum": [ "STATE_UNSPECIFIED", -"CREATING", -"ACTIVE", -"TERMINATING", -"TERMINATED", +"PENDING", +"RUNNING", +"CANCELLING", +"CANCELLED", +"SUCCEEDED", "FAILED" ], "enumDescriptions": [ -"The session state is unknown.", -"The session is created prior to running.", -"The session is running.", -"The session is terminating.", -"The session is terminated successfully.", -"The session is no longer running due to an error." +"The batch state is unknown.", +"The batch is created before running.", +"The batch is running.", +"The batch is cancelling.", +"The batch cancellation was successful.", +"The batch completed successfully.", +"The batch is no longer running due to an error." ], "readOnly": true, "type": "string" }, "stateMessage": { -"description": "Output only. Details about the state at this point in the session history.", +"description": "Output only. Details about the state at this point in history.", "readOnly": true, "type": "string" }, "stateStartTime": { -"description": "Output only. The time when the session entered the historical state.", +"description": "Output only. The time when the batch entered the historical state.", "format": "google-datetime", "readOnly": true, "type": "string" @@ -6534,565 +11652,569 @@ }, "type": "object" }, -"SessionTemplate": { -"description": "A representation of a session template.", -"id": "SessionTemplate", +"StateOperatorProgress": { +"id": "StateOperatorProgress", "properties": { -"createTime": { -"description": "Output only. The time when the template was created.", -"format": "google-datetime", -"readOnly": true, +"allRemovalsTimeMs": { +"format": "int64", "type": "string" }, -"creator": { -"description": "Output only. The email address of the user who created the template.", -"readOnly": true, +"allUpdatesTimeMs": { +"format": "int64", "type": "string" }, -"description": { -"description": "Optional. Brief description of the template.", +"commitTimeMs": { +"format": "int64", "type": "string" }, -"environmentConfig": { -"$ref": "EnvironmentConfig", -"description": "Optional. Environment configuration for session execution." -}, -"jupyterSession": { -"$ref": "JupyterConfig", -"description": "Optional. Jupyter session config." -}, -"labels": { +"customMetrics": { "additionalProperties": { +"format": "int64", "type": "string" }, -"description": "Optional. Labels to associate with sessions created using this template. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values can be empty, but, if present, must contain 1 to 63 characters and conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a session.", "type": "object" }, -"name": { -"description": "Required. The resource name of the session template.", +"memoryUsedBytes": { +"format": "int64", "type": "string" }, -"runtimeConfig": { -"$ref": "RuntimeConfig", -"description": "Optional. Runtime configuration for session execution." +"numRowsDroppedByWatermark": { +"format": "int64", +"type": "string" }, -"sparkConnectSession": { -"$ref": "SparkConnectConfig", -"description": "Optional. Spark connect session config." +"numRowsRemoved": { +"format": "int64", +"type": "string" +}, +"numRowsTotal": { +"format": "int64", +"type": "string" +}, +"numRowsUpdated": { +"format": "int64", +"type": "string" +}, +"numShufflePartitions": { +"format": "int64", +"type": "string" +}, +"numStateStoreInstances": { +"format": "int64", +"type": "string" +}, +"operatorName": { +"type": "string" +} +}, +"type": "object" +}, +"Status": { +"description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors).", +"id": "Status", +"properties": { +"code": { +"description": "The status code, which should be an enum value of google.rpc.Code.", +"format": "int32", +"type": "integer" +}, +"details": { +"description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", +"items": { +"additionalProperties": { +"description": "Properties of the object. Contains field @type with type URL.", +"type": "any" +}, +"type": "object" }, -"updateTime": { -"description": "Output only. The time the template was last updated.", -"format": "google-datetime", -"readOnly": true, -"type": "string" +"type": "array" }, -"uuid": { -"description": "Output only. A session template UUID (Unique Universal Identifier). The service generates this value when it creates the session template.", -"readOnly": true, +"message": { +"description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", "type": "string" } }, "type": "object" }, -"SetIamPolicyRequest": { -"description": "Request message for SetIamPolicy method.", -"id": "SetIamPolicyRequest", +"StopClusterRequest": { +"description": "A request to stop a cluster.", +"id": "StopClusterRequest", "properties": { -"policy": { -"$ref": "Policy", -"description": "REQUIRED: The complete policy to be applied to the resource. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Google Cloud services (such as Projects) might reject them." +"clusterUuid": { +"description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", +"type": "string" +}, +"requestId": { +"description": "Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"type": "string" } }, "type": "object" }, -"ShieldedInstanceConfig": { -"description": "Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).", -"id": "ShieldedInstanceConfig", +"StreamBlockData": { +"description": "Stream Block Data.", +"id": "StreamBlockData", "properties": { -"enableIntegrityMonitoring": { -"description": "Optional. Defines whether instances have integrity monitoring enabled.", +"deserialized": { "type": "boolean" }, -"enableSecureBoot": { -"description": "Optional. Defines whether instances have Secure Boot enabled.", -"type": "boolean" -}, -"enableVtpm": { -"description": "Optional. Defines whether instances have the vTPM enabled.", -"type": "boolean" -} +"diskSize": { +"format": "int64", +"type": "string" }, -"type": "object" +"executorId": { +"type": "string" }, -"SoftwareConfig": { -"description": "Specifies the selection and config of software inside the cluster.", -"id": "SoftwareConfig", -"properties": { -"imageVersion": { -"description": "Optional. The version of software inside the cluster. It must be one of the supported Dataproc Versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the \"preview\" version (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", +"hostPort": { "type": "string" }, -"optionalComponents": { -"description": "Optional. The set of components to activate on the cluster.", -"items": { -"enum": [ -"COMPONENT_UNSPECIFIED", -"ANACONDA", -"DOCKER", -"DRUID", -"FLINK", -"HBASE", -"HIVE_WEBHCAT", -"HUDI", -"JUPYTER", -"PRESTO", -"TRINO", -"RANGER", -"SOLR", -"ZEPPELIN", -"ZOOKEEPER" -], -"enumDescriptions": [ -"Unspecified component. Specifying this will cause Cluster creation to fail.", -"The Anaconda component is no longer supported or applicable to supported Dataproc on Compute Engine image versions (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions). It cannot be activated on clusters created with supported Dataproc on Compute Engine image versions.", -"Docker", -"The Druid query engine. (alpha)", -"Flink", -"HBase. (beta)", -"The Hive Web HCatalog (the REST service for accessing HCatalog).", -"Hudi.", -"The Jupyter Notebook.", -"The Presto query engine.", -"The Trino query engine.", -"The Ranger service.", -"The Solr service.", -"The Zeppelin notebook.", -"The Zookeeper service." -], +"memSize": { +"format": "int64", "type": "string" }, -"type": "array" +"name": { +"type": "string" }, -"properties": { -"additionalProperties": { +"storageLevel": { "type": "string" }, -"description": "Optional. The properties to set on daemon config files.Property keys are specified in prefix:property format, for example core:hadoop.tmp.dir. The following are supported prefixes and their mappings: capacity-scheduler: capacity-scheduler.xml core: core-site.xml distcp: distcp-default.xml hdfs: hdfs-site.xml hive: hive-site.xml mapred: mapred-site.xml pig: pig.properties spark: spark-defaults.conf yarn: yarn-site.xmlFor more information, see Cluster properties (https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", -"type": "object" +"useDisk": { +"type": "boolean" +}, +"useMemory": { +"type": "boolean" } }, "type": "object" }, -"SparkBatch": { -"description": "A configuration for running an Apache Spark (https://spark.apache.org/) batch workload.", -"id": "SparkBatch", +"StreamingQueryData": { +"description": "Streaming", +"id": "StreamingQueryData", "properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", -"items": { +"endTimestamp": { +"format": "int64", "type": "string" }, -"type": "array" -}, -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", -"items": { +"exception": { "type": "string" }, -"type": "array" +"isActive": { +"type": "boolean" }, -"fileUris": { -"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", -"items": { +"name": { "type": "string" }, -"type": "array" -}, -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.", -"items": { +"runId": { "type": "string" }, -"type": "array" -}, -"mainClass": { -"description": "Optional. The name of the driver main class. The jar file that contains the class must be in the classpath or specified in jar_file_uris.", +"startTimestamp": { +"format": "int64", "type": "string" }, -"mainJarFileUri": { -"description": "Optional. The HCFS URI of the jar file that contains the main class.", +"streamingQueryId": { "type": "string" } }, "type": "object" }, -"SparkConnectConfig": { -"description": "Spark connect configuration for an interactive session.", -"id": "SparkConnectConfig", -"properties": {}, -"type": "object" -}, -"SparkHistoryServerConfig": { -"description": "Spark History Server configuration for the workload.", -"id": "SparkHistoryServerConfig", +"StreamingQueryProgress": { +"id": "StreamingQueryProgress", "properties": { -"dataprocCluster": { -"description": "Optional. Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload.Example: projects/[project_id]/regions/[region]/clusters/[cluster_name]", +"batchDuration": { +"format": "int64", +"type": "string" +}, +"batchId": { +"format": "int64", +"type": "string" +}, +"durationMillis": { +"additionalProperties": { +"format": "int64", "type": "string" -} }, "type": "object" }, -"SparkJob": { -"description": "A Dataproc job for running Apache Spark (https://spark.apache.org/) applications on YARN.", -"id": "SparkJob", -"properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", -"items": { +"eventTime": { +"additionalProperties": { "type": "string" }, -"type": "array" +"type": "object" }, -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", -"items": { +"name": { "type": "string" }, -"type": "array" +"observedMetrics": { +"additionalProperties": { +"type": "string" }, -"fileUris": { -"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", -"items": { +"type": "object" +}, +"runId": { "type": "string" }, -"type": "array" +"sink": { +"$ref": "SinkProgress" }, -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", +"sources": { "items": { -"type": "string" +"$ref": "SourceProgress" }, "type": "array" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." +"stateOperators": { +"items": { +"$ref": "StateOperatorProgress" }, -"mainClass": { -"description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in SparkJob.jar_file_uris.", -"type": "string" +"type": "array" }, -"mainJarFileUri": { -"description": "The HCFS URI of the jar file that contains the main class.", +"streamingQueryProgressId": { "type": "string" }, -"properties": { -"additionalProperties": { +"timestamp": { "type": "string" -}, -"description": "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", -"type": "object" } }, "type": "object" }, -"SparkRBatch": { -"description": "A configuration for running an Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) batch workload.", -"id": "SparkRBatch", +"SubmitJobRequest": { +"description": "A request to submit a job.", +"id": "SubmitJobRequest", "properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", -"items": { -"type": "string" -}, -"type": "array" +"job": { +"$ref": "Job", +"description": "Required. The job resource." }, -"args": { -"description": "Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as --conf, since a collision can occur that causes an incorrect batch submission.", -"items": { +"requestId": { +"description": "Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", "type": "string" +} }, -"type": "array" +"type": "object" }, -"fileUris": { -"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor.", -"items": { +"SummarizeSessionSparkApplicationExecutorsResponse": { +"description": "Consolidated summary of executors for a Spark Application.", +"id": "SummarizeSessionSparkApplicationExecutorsResponse", +"properties": { +"activeExecutorSummary": { +"$ref": "ConsolidatedExecutorSummary", +"description": "Consolidated summary for active executors." +}, +"applicationId": { +"description": "Spark Application Id", "type": "string" }, -"type": "array" +"deadExecutorSummary": { +"$ref": "ConsolidatedExecutorSummary", +"description": "Consolidated summary for dead executors." }, -"mainRFileUri": { -"description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R or .r file.", -"type": "string" +"totalExecutorSummary": { +"$ref": "ConsolidatedExecutorSummary", +"description": "Overall consolidated summary for all executors." } }, "type": "object" }, -"SparkRJob": { -"description": "A Dataproc job for running Apache SparkR (https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.", -"id": "SparkRJob", +"SummarizeSessionSparkApplicationJobsResponse": { +"description": "Summary of a Spark Application jobs.", +"id": "SummarizeSessionSparkApplicationJobsResponse", "properties": { -"archiveUris": { -"description": "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", -"items": { -"type": "string" -}, -"type": "array" -}, -"args": { -"description": "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", -"items": { -"type": "string" +"jobsSummary": { +"$ref": "JobsSummary", +"description": "Summary of a Spark Application Jobs" +} }, -"type": "array" +"type": "object" }, -"fileUris": { -"description": "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", -"items": { -"type": "string" +"SummarizeSessionSparkApplicationStageAttemptTasksResponse": { +"description": "Summary of tasks for a Spark Application stage attempt.", +"id": "SummarizeSessionSparkApplicationStageAttemptTasksResponse", +"properties": { +"stageAttemptTasksSummary": { +"$ref": "StageAttemptTasksSummary", +"description": "Summary of tasks for a Spark Application Stage Attempt" +} }, -"type": "array" +"type": "object" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." +"SummarizeSessionSparkApplicationStagesResponse": { +"description": "Summary of a Spark Application stages.", +"id": "SummarizeSessionSparkApplicationStagesResponse", +"properties": { +"stagesSummary": { +"$ref": "StagesSummary", +"description": "Summary of a Spark Application Stages" +} }, -"mainRFileUri": { -"description": "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", -"type": "string" +"type": "object" }, +"SummarizeSparkApplicationExecutorsResponse": { +"description": "Consolidated summary of executors for a Spark Application.", +"id": "SummarizeSparkApplicationExecutorsResponse", "properties": { -"additionalProperties": { +"activeExecutorSummary": { +"$ref": "ConsolidatedExecutorSummary", +"description": "Consolidated summary for active executors." +}, +"applicationId": { +"description": "Spark Application Id", "type": "string" }, -"description": "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API might be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", -"type": "object" +"deadExecutorSummary": { +"$ref": "ConsolidatedExecutorSummary", +"description": "Consolidated summary for dead executors." +}, +"totalExecutorSummary": { +"$ref": "ConsolidatedExecutorSummary", +"description": "Overall consolidated summary for all executors." } }, "type": "object" }, -"SparkSqlBatch": { -"description": "A configuration for running Apache Spark SQL (https://spark.apache.org/sql/) queries as a batch workload.", -"id": "SparkSqlBatch", +"SummarizeSparkApplicationJobsResponse": { +"description": "Summary of a Spark Application jobs.", +"id": "SummarizeSparkApplicationJobsResponse", "properties": { -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", -"items": { -"type": "string" -}, -"type": "array" +"jobsSummary": { +"$ref": "JobsSummary", +"description": "Summary of a Spark Application Jobs" +} }, -"queryFileUri": { -"description": "Required. The HCFS URI of the script that contains Spark SQL queries to execute.", -"type": "string" +"type": "object" }, -"queryVariables": { -"additionalProperties": { -"type": "string" +"SummarizeSparkApplicationStageAttemptTasksResponse": { +"description": "Summary of tasks for a Spark Application stage attempt.", +"id": "SummarizeSparkApplicationStageAttemptTasksResponse", +"properties": { +"stageAttemptTasksSummary": { +"$ref": "StageAttemptTasksSummary", +"description": "Summary of tasks for a Spark Application Stage Attempt" +} }, -"description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", "type": "object" +}, +"SummarizeSparkApplicationStagesResponse": { +"description": "Summary of a Spark Application stages.", +"id": "SummarizeSparkApplicationStagesResponse", +"properties": { +"stagesSummary": { +"$ref": "StagesSummary", +"description": "Summary of a Spark Application Stages" } }, "type": "object" }, -"SparkSqlJob": { -"description": "A Dataproc job for running Apache Spark SQL (https://spark.apache.org/sql/) queries.", -"id": "SparkSqlJob", +"TaskData": { +"description": "Data corresponding to tasks created by spark.", +"id": "TaskData", "properties": { -"jarFileUris": { -"description": "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", +"accumulatorUpdates": { "items": { -"type": "string" +"$ref": "AccumulableInfo" }, "type": "array" }, -"loggingConfig": { -"$ref": "LoggingConfig", -"description": "Optional. The runtime log config for job execution." +"attempt": { +"format": "int32", +"type": "integer" }, -"properties": { +"durationMillis": { +"format": "int64", +"type": "string" +}, +"errorMessage": { +"type": "string" +}, +"executorId": { +"type": "string" +}, +"executorLogs": { "additionalProperties": { "type": "string" }, -"description": "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API might be overwritten.", "type": "object" }, -"queryFileUri": { -"description": "The HCFS URI of the script that contains SQL queries.", +"gettingResultTimeMillis": { +"format": "int64", "type": "string" }, -"queryList": { -"$ref": "QueryList", -"description": "A list of queries." +"hasMetrics": { +"type": "boolean" }, -"scriptVariables": { -"additionalProperties": { +"host": { "type": "string" }, -"description": "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).", -"type": "object" -} +"index": { +"format": "int32", +"type": "integer" }, -"type": "object" +"launchTime": { +"format": "google-datetime", +"type": "string" }, -"SparkStandaloneAutoscalingConfig": { -"description": "Basic autoscaling configurations for Spark Standalone.", -"id": "SparkStandaloneAutoscalingConfig", -"properties": { -"gracefulDecommissionTimeout": { -"description": "Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.", -"format": "google-duration", +"partitionId": { +"format": "int32", +"type": "integer" +}, +"resultFetchStart": { +"format": "google-datetime", "type": "string" }, -"removeOnlyIdleWorkers": { -"description": "Optional. Remove only idle workers when scaling down cluster", +"schedulerDelayMillis": { +"format": "int64", +"type": "string" +}, +"speculative": { "type": "boolean" }, -"scaleDownFactor": { -"description": "Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.", -"format": "double", -"type": "number" +"stageAttemptId": { +"format": "int32", +"type": "integer" }, -"scaleDownMinWorkerFraction": { -"description": "Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", -"format": "double", -"type": "number" +"stageId": { +"format": "int64", +"type": "string" }, -"scaleUpFactor": { -"description": "Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.", -"format": "double", -"type": "number" +"status": { +"type": "string" }, -"scaleUpMinWorkerFraction": { -"description": "Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.", -"format": "double", -"type": "number" +"taskId": { +"format": "int64", +"type": "string" +}, +"taskLocality": { +"type": "string" +}, +"taskMetrics": { +"$ref": "TaskMetrics" } }, "type": "object" }, -"StartClusterRequest": { -"description": "A request to start a cluster.", -"id": "StartClusterRequest", +"TaskMetrics": { +"description": "Executor Task Metrics", +"id": "TaskMetrics", "properties": { -"clusterUuid": { -"description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", +"diskBytesSpilled": { +"format": "int64", "type": "string" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two StartClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"executorCpuTimeNanos": { +"format": "int64", "type": "string" -} }, -"type": "object" +"executorDeserializeCpuTimeNanos": { +"format": "int64", +"type": "string" }, -"StartupConfig": { -"description": "Configuration to handle the startup of instances during cluster create and update process.", -"id": "StartupConfig", -"properties": { -"requiredRegistrationFraction": { -"description": "Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).", -"format": "double", -"type": "number" -} +"executorDeserializeTimeMillis": { +"format": "int64", +"type": "string" }, -"type": "object" +"executorRunTimeMillis": { +"format": "int64", +"type": "string" }, -"StateHistory": { -"description": "Historical state information.", -"id": "StateHistory", -"properties": { -"state": { -"description": "Output only. The state of the batch at this point in history.", -"enum": [ -"STATE_UNSPECIFIED", -"PENDING", -"RUNNING", -"CANCELLING", -"CANCELLED", -"SUCCEEDED", -"FAILED" -], -"enumDescriptions": [ -"The batch state is unknown.", -"The batch is created before running.", -"The batch is running.", -"The batch is cancelling.", -"The batch cancellation was successful.", -"The batch completed successfully.", -"The batch is no longer running due to an error." -], -"readOnly": true, +"inputMetrics": { +"$ref": "InputMetrics" +}, +"jvmGcTimeMillis": { +"format": "int64", "type": "string" }, -"stateMessage": { -"description": "Output only. Details about the state at this point in history.", -"readOnly": true, +"memoryBytesSpilled": { +"format": "int64", "type": "string" }, -"stateStartTime": { -"description": "Output only. The time when the batch entered the historical state.", -"format": "google-datetime", -"readOnly": true, +"outputMetrics": { +"$ref": "OutputMetrics" +}, +"peakExecutionMemoryBytes": { +"format": "int64", +"type": "string" +}, +"resultSerializationTimeMillis": { +"format": "int64", +"type": "string" +}, +"resultSize": { +"format": "int64", "type": "string" +}, +"shuffleReadMetrics": { +"$ref": "ShuffleReadMetrics" +}, +"shuffleWriteMetrics": { +"$ref": "ShuffleWriteMetrics" } }, "type": "object" }, -"Status": { -"description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors).", -"id": "Status", +"TaskQuantileMetrics": { +"id": "TaskQuantileMetrics", "properties": { -"code": { -"description": "The status code, which should be an enum value of google.rpc.Code.", -"format": "int32", -"type": "integer" +"diskBytesSpilled": { +"$ref": "Quantiles" }, -"details": { -"description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", -"items": { -"additionalProperties": { -"description": "Properties of the object. Contains field @type with type URL.", -"type": "any" +"durationMillis": { +"$ref": "Quantiles" }, -"type": "object" +"executorCpuTimeNanos": { +"$ref": "Quantiles" }, -"type": "array" +"executorDeserializeCpuTimeNanos": { +"$ref": "Quantiles" }, -"message": { -"description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", -"type": "string" -} +"executorDeserializeTimeMillis": { +"$ref": "Quantiles" }, -"type": "object" +"executorRunTimeMillis": { +"$ref": "Quantiles" }, -"StopClusterRequest": { -"description": "A request to stop a cluster.", -"id": "StopClusterRequest", -"properties": { -"clusterUuid": { -"description": "Optional. Specifying the cluster_uuid means the RPC will fail (with error NOT_FOUND) if a cluster with the specified UUID does not exist.", -"type": "string" +"gettingResultTimeMillis": { +"$ref": "Quantiles" }, -"requestId": { -"description": "Optional. A unique ID used to identify the request. If the server receives two StopClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.Recommendation: Set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", -"type": "string" +"inputMetrics": { +"$ref": "InputQuantileMetrics" +}, +"jvmGcTimeMillis": { +"$ref": "Quantiles" +}, +"memoryBytesSpilled": { +"$ref": "Quantiles" +}, +"outputMetrics": { +"$ref": "OutputQuantileMetrics" +}, +"peakExecutionMemoryBytes": { +"$ref": "Quantiles" +}, +"resultSerializationTimeMillis": { +"$ref": "Quantiles" +}, +"resultSize": { +"$ref": "Quantiles" +}, +"schedulerDelayMillis": { +"$ref": "Quantiles" +}, +"shuffleReadMetrics": { +"$ref": "ShuffleReadQuantileMetrics" +}, +"shuffleWriteMetrics": { +"$ref": "ShuffleWriteQuantileMetrics" } }, "type": "object" }, -"SubmitJobRequest": { -"description": "A request to submit a job.", -"id": "SubmitJobRequest", +"TaskResourceRequest": { +"description": "Resources used per task created by the application.", +"id": "TaskResourceRequest", "properties": { -"job": { -"$ref": "Job", -"description": "Required. The job resource." +"amount": { +"format": "double", +"type": "number" }, -"requestId": { -"description": "Optional. A unique id used to identify the request. If the server receives two SubmitJobRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s with the same id, then the second request will be ignored and the first Job created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.", +"resourceName": { "type": "string" } }, @@ -7548,6 +12670,53 @@ }, "type": "object" }, +"WriteSessionSparkApplicationContextRequest": { +"description": "Write Spark Application data to internal storage systems", +"id": "WriteSessionSparkApplicationContextRequest", +"properties": { +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"type": "string" +}, +"sparkWrapperObjects": { +"description": "Required. The batch of spark application context objects sent for ingestion.", +"items": { +"$ref": "SparkWrapperObject" +}, +"type": "array" +} +}, +"type": "object" +}, +"WriteSessionSparkApplicationContextResponse": { +"description": "Response returned as an acknowledgement of receipt of data.", +"id": "WriteSessionSparkApplicationContextResponse", +"properties": {}, +"type": "object" +}, +"WriteSparkApplicationContextRequest": { +"description": "Write Spark Application data to internal storage systems", +"id": "WriteSparkApplicationContextRequest", +"properties": { +"parent": { +"description": "Required. Parent (Batch) resource reference.", +"type": "string" +}, +"sparkWrapperObjects": { +"items": { +"$ref": "SparkWrapperObject" +}, +"type": "array" +} +}, +"type": "object" +}, +"WriteSparkApplicationContextResponse": { +"description": "Response returned as an acknowledgement of receipt of data.", +"id": "WriteSparkApplicationContextResponse", +"properties": {}, +"type": "object" +}, "YarnApplication": { "description": "A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto.Beta Feature: This report is available for testing purposes only. It may be changed before final release.", "id": "YarnApplication", diff --git a/googleapiclient/discovery_cache/documents/developerconnect.v1.json b/googleapiclient/discovery_cache/documents/developerconnect.v1.json index a7ce09f0163..7ca8ffc170b 100644 --- a/googleapiclient/discovery_cache/documents/developerconnect.v1.json +++ b/googleapiclient/discovery_cache/documents/developerconnect.v1.json @@ -20,6 +20,51 @@ "description": "Regional Endpoint", "endpointUrl": "https://developerconnect.us-central1.rep.googleapis.com/", "location": "us-central1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.europe-west1.rep.googleapis.com/", +"location": "europe-west1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.asia-east1.rep.googleapis.com/", +"location": "asia-east1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.asia-east2.rep.googleapis.com/", +"location": "asia-east2" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.europe-west4.rep.googleapis.com/", +"location": "europe-west4" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.us-east4.rep.googleapis.com/", +"location": "us-east4" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.us-east5.rep.googleapis.com/", +"location": "us-east5" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.asia-southeast1.rep.googleapis.com/", +"location": "asia-southeast1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.us-west1.rep.googleapis.com/", +"location": "us-west1" +}, +{ +"description": "Regional Endpoint", +"endpointUrl": "https://developerconnect.us-west2.rep.googleapis.com/", +"location": "us-west2" } ], "fullyEncodeReservedExpansion": true, @@ -847,7 +892,7 @@ } } }, -"revision": "20240919", +"revision": "20240926", "rootUrl": "https://developerconnect.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json index 547118637ba..10137f424d4 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json @@ -5629,15 +5629,15 @@ } } }, -"identity_mapping_stores": { +"identityMappingStores": { "resources": { "operations": { "methods": { "get": { "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/identity_mapping_stores/{identity_mapping_storesId}/operations/{operationsId}", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/identityMappingStores/{identityMappingStoresId}/operations/{operationsId}", "httpMethod": "GET", -"id": "discoveryengine.projects.locations.identity_mapping_stores.operations.get", +"id": "discoveryengine.projects.locations.identityMappingStores.operations.get", "parameterOrder": [ "name" ], @@ -5645,7 +5645,7 @@ "name": { "description": "The name of the operation resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/identity_mapping_stores/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/identityMappingStores/[^/]+/operations/[^/]+$", "required": true, "type": "string" } @@ -5660,9 +5660,9 @@ }, "list": { "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/identity_mapping_stores/{identity_mapping_storesId}/operations", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/identityMappingStores/{identityMappingStoresId}/operations", "httpMethod": "GET", -"id": "discoveryengine.projects.locations.identity_mapping_stores.operations.list", +"id": "discoveryengine.projects.locations.identityMappingStores.operations.list", "parameterOrder": [ "name" ], @@ -5675,7 +5675,7 @@ "name": { "description": "The name of the operation's parent resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/identity_mapping_stores/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/identityMappingStores/[^/]+$", "required": true, "type": "string" }, @@ -5986,7 +5986,7 @@ } } }, -"revision": "20240920", +"revision": "20240930", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -8163,6 +8163,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -8744,6 +8748,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -11692,6 +11700,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ @@ -12540,6 +12556,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -12992,6 +13012,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -15428,6 +15452,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ @@ -15768,6 +15800,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -16211,6 +16247,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -17996,6 +18036,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json index 3d4d7443383..bd9316cdbd7 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json @@ -6709,15 +6709,15 @@ } } }, -"identity_mapping_stores": { +"identityMappingStores": { "resources": { "operations": { "methods": { "get": { "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", -"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/identity_mapping_stores/{identity_mapping_storesId}/operations/{operationsId}", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/identityMappingStores/{identityMappingStoresId}/operations/{operationsId}", "httpMethod": "GET", -"id": "discoveryengine.projects.locations.identity_mapping_stores.operations.get", +"id": "discoveryengine.projects.locations.identityMappingStores.operations.get", "parameterOrder": [ "name" ], @@ -6725,7 +6725,7 @@ "name": { "description": "The name of the operation resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/identity_mapping_stores/[^/]+/operations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/identityMappingStores/[^/]+/operations/[^/]+$", "required": true, "type": "string" } @@ -6740,9 +6740,9 @@ }, "list": { "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", -"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/identity_mapping_stores/{identity_mapping_storesId}/operations", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/identityMappingStores/{identityMappingStoresId}/operations", "httpMethod": "GET", -"id": "discoveryengine.projects.locations.identity_mapping_stores.operations.list", +"id": "discoveryengine.projects.locations.identityMappingStores.operations.list", "parameterOrder": [ "name" ], @@ -6755,7 +6755,7 @@ "name": { "description": "The name of the operation's parent resource.", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/identity_mapping_stores/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/identityMappingStores/[^/]+$", "required": true, "type": "string" }, @@ -7517,7 +7517,7 @@ } } }, -"revision": "20240920", +"revision": "20240930", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -8531,6 +8531,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -9342,6 +9346,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ @@ -9790,6 +9802,10 @@ "format": "int32", "type": "integer" }, +"naturalLanguageQueryUnderstandingSpec": { +"$ref": "GoogleCloudDiscoveryengineV1alphaSearchRequestNaturalLanguageQueryUnderstandingSpec", +"description": "Optional. Specification to enable natural language understanding capabilities for search requests." +}, "orderBy": { "description": "The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.", "type": "string" @@ -11529,6 +11545,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -12189,6 +12209,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -17120,6 +17144,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ @@ -17460,6 +17492,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -17903,6 +17939,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -19688,6 +19728,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json index 61ab778843c..d66a1fa2ba1 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json @@ -6158,6 +6158,80 @@ } } }, +"identityMappingStores": { +"resources": { +"operations": { +"methods": { +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/identityMappingStores/{identityMappingStoresId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "discoveryengine.projects.locations.identityMappingStores.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/identityMappingStores/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1beta/projects/{projectsId}/locations/{locationsId}/identityMappingStores/{identityMappingStoresId}/operations", +"httpMethod": "GET", +"id": "discoveryengine.projects.locations.identityMappingStores.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/identityMappingStores/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1beta/{+name}/operations", +"response": { +"$ref": "GoogleLongrunningListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +}, "operations": { "methods": { "get": { @@ -6786,7 +6860,7 @@ } } }, -"revision": "20240920", +"revision": "20240930", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -7614,6 +7688,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -8425,6 +8503,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ @@ -9273,6 +9359,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -9725,6 +9815,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -12161,6 +12255,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ @@ -12590,6 +12692,10 @@ "format": "int32", "type": "integer" }, +"naturalLanguageQueryUnderstandingSpec": { +"$ref": "GoogleCloudDiscoveryengineV1betaSearchRequestNaturalLanguageQueryUnderstandingSpec", +"description": "Optional. Specification to enable natural language understanding capabilities for search requests." +}, "orderBy": { "description": "The order in which documents are returned. Documents can be ordered by a field in an Document object. Leave it unset if ordered by relevance. `order_by` expression is case-sensitive. For more information on ordering, see [Ordering](https://cloud.google.com/retail/docs/filter-and-order#order) If this field is unrecognizable, an `INVALID_ARGUMENT` is returned.", "type": "string" @@ -14229,6 +14335,10 @@ "description": "The display name of the model.", "type": "string" }, +"errorMessage": { +"description": "Currently this is only populated if the model state is `INPUT_VALIDATION_FAILED`.", +"type": "string" +}, "metrics": { "additionalProperties": { "format": "double", @@ -14844,6 +14954,10 @@ }, "type": "array" }, +"disableAnalytics": { +"description": "Optional. Whether to disable analytics for searches performed on this engine.", +"type": "boolean" +}, "displayName": { "description": "Required. The display name of the engine. Should be human readable. UTF-8 encoded string with limit of 1024 characters.", "type": "string" @@ -18889,6 +19003,14 @@ "description": "Obfuscated Dasher customer ID.", "type": "string" }, +"superAdminEmailAddress": { +"description": "Optional. The super admin email address for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, +"superAdminServiceAccount": { +"description": "Optional. The super admin service account for the workspace that will be used for access token generation. For now we only use it for Native Google Drive connector data ingestion.", +"type": "string" +}, "type": { "description": "The Google Workspace data source.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v2.json b/googleapiclient/discovery_cache/documents/displayvideo.v2.json index 8d2d69c7d19..62b3f59affc 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v2.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v2.json @@ -9267,7 +9267,7 @@ } } }, -"revision": "20240919", +"revision": "20241003", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActivateManualTriggerRequest": { @@ -20066,8 +20066,8 @@ false "Content related to motor vehicle, aviation or other transportation accidents.", "Issues that evoke strong, opposing views and spark debate. These include issues that are controversial in most countries and markets (such as abortion), as well as those that are controversial in specific countries and markets (such as immigration reform in the United States).", "Content which may be considered shocking or disturbing, such as violent news stories, stunts, or toilet humor.", -"YouTube videos embedded on websites outside of YouTube.com. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories).", -"Video of live events streamed over the internet. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories)." +"YouTube videos embedded on websites outside of YouTube.com.", +"Video of live events streamed over the internet." ], "type": "string" } @@ -20121,8 +20121,8 @@ false "Content related to motor vehicle, aviation or other transportation accidents.", "Issues that evoke strong, opposing views and spark debate. These include issues that are controversial in most countries and markets (such as abortion), as well as those that are controversial in specific countries and markets (such as immigration reform in the United States).", "Content which may be considered shocking or disturbing, such as violent news stories, stunts, or toilet humor.", -"YouTube videos embedded on websites outside of YouTube.com. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories).", -"Video of live events streamed over the internet. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories)." +"YouTube videos embedded on websites outside of YouTube.com.", +"Video of live events streamed over the internet." ], "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v3.json b/googleapiclient/discovery_cache/documents/displayvideo.v3.json index ad76695de9d..aa7ddbfb06e 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v3.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v3.json @@ -9222,7 +9222,7 @@ } } }, -"revision": "20240919", +"revision": "20241003", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActiveViewVideoViewabilityMetricConfig": { @@ -20837,8 +20837,8 @@ false "Content related to motor vehicle, aviation or other transportation accidents.", "Issues that evoke strong, opposing views and spark debate. These include issues that are controversial in most countries and markets (such as abortion), as well as those that are controversial in specific countries and markets (such as immigration reform in the United States).", "Content which may be considered shocking or disturbing, such as violent news stories, stunts, or toilet humor.", -"YouTube videos embedded on websites outside of YouTube.com. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories).", -"Video of live events streamed over the internet. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories)." +"YouTube videos embedded on websites outside of YouTube.com.", +"Video of live events streamed over the internet." ], "type": "string" } @@ -20892,8 +20892,8 @@ false "Content related to motor vehicle, aviation or other transportation accidents.", "Issues that evoke strong, opposing views and spark debate. These include issues that are controversial in most countries and markets (such as abortion), as well as those that are controversial in specific countries and markets (such as immigration reform in the United States).", "Content which may be considered shocking or disturbing, such as violent news stories, stunts, or toilet humor.", -"YouTube videos embedded on websites outside of YouTube.com. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories).", -"Video of live events streamed over the internet. Only applicable to YouTube and Partners line items. *Warning*: On **September 30, 2024**, this value will be sunset. [Read more about this announced change](/display-video/api/deprecations#features.yt_li_categories)." +"YouTube videos embedded on websites outside of YouTube.com.", +"Video of live events streamed over the internet." ], "readOnly": true, "type": "string" @@ -21054,7 +21054,7 @@ false "type": "boolean" }, "enableOptimizedTargeting": { -"description": "Required. Whether to enable Optimized Targeting for the line item. *Warning*: Starting on **September 30, 2024**, optimized targeting will no longer be compatible with a subset of bid strategies. [Read more about this announced change](/display-video/api/deprecations#features.ot_bid_strategies).", +"description": "Required. Whether to enable Optimized Targeting for the line item. Optimized targeting is not compatible with all bid strategies. Attempting to set this field to `true` for a line item using one of the following combinations of BiddingStrategy fields and BiddingStrategyPerformanceGoalType will result in an error: maximize_auto_spend_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_CIVA` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_IVO_TEN` * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_AV_VIEWED` performance_goal_auto_bid: * `BIDDING_STRATEGY_PERFORMANCE_GOAL_TYPE_VIEWABLE_CPM`", "type": "boolean" } }, diff --git a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json index a7802ac399b..04276aa1c0f 100644 --- a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json @@ -4,9 +4,6 @@ "scopes": { "https://www.googleapis.com/auth/factchecktools": { "description": "Read, create, update, and delete your ClaimReview data." -}, -"https://www.googleapis.com/auth/userinfo.email": { -"description": "See your primary Google Account email address" } } } @@ -220,8 +217,7 @@ "$ref": "GoogleFactcheckingFactchecktoolsV1alpha1ClaimReviewMarkupPage" }, "scopes": [ -"https://www.googleapis.com/auth/factchecktools", -"https://www.googleapis.com/auth/userinfo.email" +"https://www.googleapis.com/auth/factchecktools" ] }, "delete": { @@ -246,8 +242,7 @@ "$ref": "GoogleProtobufEmpty" }, "scopes": [ -"https://www.googleapis.com/auth/factchecktools", -"https://www.googleapis.com/auth/userinfo.email" +"https://www.googleapis.com/auth/factchecktools" ] }, "get": { @@ -272,8 +267,7 @@ "$ref": "GoogleFactcheckingFactchecktoolsV1alpha1ClaimReviewMarkupPage" }, "scopes": [ -"https://www.googleapis.com/auth/factchecktools", -"https://www.googleapis.com/auth/userinfo.email" +"https://www.googleapis.com/auth/factchecktools" ] }, "list": { @@ -316,8 +310,7 @@ "$ref": "GoogleFactcheckingFactchecktoolsV1alpha1ListClaimReviewMarkupPagesResponse" }, "scopes": [ -"https://www.googleapis.com/auth/factchecktools", -"https://www.googleapis.com/auth/userinfo.email" +"https://www.googleapis.com/auth/factchecktools" ] }, "update": { @@ -345,14 +338,13 @@ "$ref": "GoogleFactcheckingFactchecktoolsV1alpha1ClaimReviewMarkupPage" }, "scopes": [ -"https://www.googleapis.com/auth/factchecktools", -"https://www.googleapis.com/auth/userinfo.email" +"https://www.googleapis.com/auth/factchecktools" ] } } } }, -"revision": "20240707", +"revision": "20240929", "rootUrl": "https://factchecktools.googleapis.com/", "schemas": { "GoogleFactcheckingFactchecktoolsV1alpha1Claim": { diff --git a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1.json b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1.json index 559c88885ef..a11345df90b 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1.json +++ b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1.json @@ -1370,7 +1370,7 @@ ], "parameters": { "name": { -"description": "Required. The relative resource name of the Service to retrieve, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore)", +"description": "Required. The relative resource name of the Service to retrieve, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS)", "location": "path", "pattern": "^projects/[^/]+/services/[^/]+$", "required": true, @@ -1433,7 +1433,7 @@ ], "parameters": { "name": { -"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore)", +"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS)", "location": "path", "pattern": "^projects/[^/]+/services/[^/]+$", "required": true, @@ -1661,7 +1661,7 @@ } } }, -"revision": "20240708", +"revision": "20240930", "rootUrl": "https://firebaseappcheck.googleapis.com/", "schemas": { "GoogleFirebaseAppcheckV1AppAttestConfig": { @@ -1681,11 +1681,11 @@ "type": "object" }, "GoogleFirebaseAppcheckV1AppCheckToken": { -"description": "Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check.", +"description": "Encapsulates an *App Check token*, which are used to access backend services protected by App Check.", "id": "GoogleFirebaseAppcheckV1AppCheckToken", "properties": { "token": { -"description": "The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK.", +"description": "The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries.", "type": "string" }, "ttl": { @@ -2277,7 +2277,7 @@ "type": "object" }, "GoogleFirebaseAppcheckV1ResourcePolicy": { -"description": "App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration.", +"description": "App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration.", "id": "GoogleFirebaseAppcheckV1ResourcePolicy", "properties": { "enforcementMode": { @@ -2303,7 +2303,7 @@ "type": "string" }, "targetResource": { -"description": "Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created.", +"description": "Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created.", "type": "string" }, "updateTime": { @@ -2351,7 +2351,7 @@ "type": "string" }, "name": { -"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore)", +"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS)", "type": "string" } }, @@ -2379,7 +2379,7 @@ "properties": { "service": { "$ref": "GoogleFirebaseAppcheckV1Service", -"description": "Required. The Service to update. The Service's `name` field is used to identify the Service to be updated, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore)" +"description": "Required. The Service to update. The Service's `name` field is used to identify the Service to be updated, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `oauth2.googleapis.com` (Google Identity for iOS)" }, "updateMask": { "description": "Required. A comma-separated list of names of fields in the Service to update. Example: `enforcement_mode`.", diff --git a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json index 8010edfb7c3..5af296d0c75 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseappcheck.v1beta.json @@ -1595,7 +1595,7 @@ ], "parameters": { "name": { -"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform)", +"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS)", "location": "path", "pattern": "^projects/[^/]+/services/[^/]+$", "required": true, @@ -1823,7 +1823,7 @@ } } }, -"revision": "20240708", +"revision": "20240930", "rootUrl": "https://firebaseappcheck.googleapis.com/", "schemas": { "GoogleFirebaseAppcheckV1betaAppAttestConfig": { @@ -1843,16 +1843,16 @@ "type": "object" }, "GoogleFirebaseAppcheckV1betaAppCheckToken": { -"description": "Encapsulates an *App Check token*, which are used to access Firebase services protected by App Check.", +"description": "Encapsulates an *App Check token*, which are used to access backend services protected by App Check.", "id": "GoogleFirebaseAppcheckV1betaAppCheckToken", "properties": { "attestationToken": { "deprecated": true, -"description": "An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check.", +"description": "The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries.", "type": "string" }, "token": { -"description": "An App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and Firebase project. This token is used to access Firebase services protected by App Check.", +"description": "The App Check token. App Check tokens are signed [JWTs](https://tools.ietf.org/html/rfc7519) containing claims that identify the attested app and GCP project. This token is used to access Google services protected by App Check. These tokens can also be [verified by your own custom backends](https://firebase.google.com/docs/app-check/custom-resource-backend) using the Firebase Admin SDK or third-party libraries.", "type": "string" }, "ttl": { @@ -2519,7 +2519,7 @@ "type": "object" }, "GoogleFirebaseAppcheckV1betaResourcePolicy": { -"description": "App Check enforcement policy for a specific resource of a Firebase service supported by App Check. Note that this policy will override the service-level configuration.", +"description": "App Check enforcement policy for a specific resource of a Google service supported by App Check. Note that this policy will override the service-level configuration.", "id": "GoogleFirebaseAppcheckV1betaResourcePolicy", "properties": { "enforcementMode": { @@ -2545,7 +2545,7 @@ "type": "string" }, "targetResource": { -"description": "Required. Service specific name of the resource object to which this policy applies, in the format: * `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` (Google Identity for iOS) Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created.", +"description": "Required. Service specific name of the resource object to which this policy applies, in the format: * **iOS OAuth clients** (Google Identity for iOS): `//oauth2.googleapis.com/projects/{project_number}/oauthClients/{oauth_client_id}` Note that the resource must belong to the service specified in the `name` and be from the same project as this policy, but the resource is allowed to be missing at the time of creation of this policy; in that case, we make a best-effort attempt at respecting this policy, but it may not have any effect until the resource is fully created.", "type": "string" }, "updateTime": { @@ -2597,7 +2597,7 @@ "type": "string" }, "name": { -"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform)", +"description": "Required. The relative resource name of the service configuration object, in the format: ``` projects/{project_number}/services/{service_id} ``` Note that the `service_id` element must be a supported service ID. Currently, the following service IDs are supported: * `firebasestorage.googleapis.com` (Cloud Storage for Firebase) * `firebasedatabase.googleapis.com` (Firebase Realtime Database) * `firestore.googleapis.com` (Cloud Firestore) * `identitytoolkit.googleapis.com` (Firebase Authentication with Identity Platform) * `oauth2.googleapis.com` (Google Identity for iOS)", "type": "string" }, "updateTime": { diff --git a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json index 4f97f19f1d1..fad47250ad3 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1alpha.json @@ -448,6 +448,31 @@ "resources": { "tests": { "methods": { +"cancel": { +"description": "Abort automated test run on release.", +"flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/releases/{releasesId}/tests/{testsId}:cancel", +"httpMethod": "GET", +"id": "firebaseappdistribution.projects.apps.releases.tests.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`", +"location": "path", +"pattern": "^projects/[^/]+/apps/[^/]+/releases/[^/]+/tests/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+name}:cancel", +"response": { +"$ref": "GoogleFirebaseAppdistroV1alphaCancelReleaseTestResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "create": { "description": "Run automated test(s) on release.", "flatPath": "v1alpha/projects/{projectsId}/apps/{appsId}/releases/{releasesId}/tests", @@ -465,7 +490,7 @@ "type": "string" }, "releaseTestId": { -"description": "Optional. The ID to use for the test, which will become the final component of the tests's resource name. This value should be 4-63 characters, and valid characters are /a-z-/. If it is not provided one will be automatically generated.", +"description": "Optional. The ID to use for the test, which will become the final component of the test's resource name. This value should be 4-63 characters, and valid characters are /a-z-/. If it is not provided one will be automatically generated.", "location": "query", "type": "string" } @@ -585,7 +610,7 @@ } } }, -"revision": "20240911", +"revision": "20241004", "rootUrl": "https://firebaseappdistribution.googleapis.com/", "schemas": { "AndroidxCrawlerOutputPoint": { @@ -874,6 +899,12 @@ }, "type": "object" }, +"GoogleFirebaseAppdistroV1alphaCancelReleaseTestResponse": { +"description": "The (empty) response message for `CancelReleaseTest`.", +"id": "GoogleFirebaseAppdistroV1alphaCancelReleaseTestResponse", +"properties": {}, +"type": "object" +}, "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesRequest": { "id": "GoogleFirebaseAppdistroV1alphaCreateReleaseNotesRequest", "properties": { @@ -1440,6 +1471,25 @@ "name": { "description": "The name of the release test resource. Format: `projects/{project_number}/apps/{app_id}/releases/{release_id}/tests/{test_id}`", "type": "string" +}, +"testState": { +"description": "Output only. The state of the release test.", +"enum": [ +"TEST_STATE_UNSPECIFIED", +"IN_PROGRESS", +"PASSED", +"FAILED", +"INCONCLUSIVE" +], +"enumDescriptions": [ +"Test state unspecified.", +"The test is in progress.", +"The test has passed.", +"The test has failed.", +"The test was inconclusive." +], +"readOnly": true, +"type": "string" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json b/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json index 9aa8b29402d..3573a390404 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json @@ -206,7 +206,7 @@ } } }, -"revision": "20240925", +"revision": "20241003", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "Date": { @@ -738,6 +738,10 @@ "description": "Generation config.", "id": "GoogleCloudAiplatformV1beta1GenerationConfig", "properties": { +"audioTimestamp": { +"description": "Optional. If enabled, audio timestamp will be included in the request to the model.", +"type": "boolean" +}, "candidateCount": { "description": "Optional. Number of candidates to generate.", "format": "int32", @@ -1097,7 +1101,7 @@ "id": "GoogleCloudAiplatformV1beta1RetrievalMetadata", "properties": { "googleSearchDynamicRetrievalScore": { -"description": "Optional. Score indicating how likely information from google search could help answer the prompt. The score is in the range [0, 1], where 0 is the least likely and 1 is the most likely. This score is only populated when google search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger google search.", +"description": "Optional. Score indicating how likely information from Google Search could help answer the prompt. The score is in the range `[0, 1]`, where 0 is the least likely and 1 is the most likely. This score is only populated when Google Search grounding and dynamic retrieval is enabled. It will be compared to the threshold to determine whether to trigger Google Search.", "format": "float", "type": "number" } @@ -1268,7 +1272,7 @@ "type": "string" }, "enum": { -"description": "Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:[\"EAST\", NORTH\", \"SOUTH\", \"WEST\"]}", +"description": "Optional. Possible values of the element of primitive type with enum format. Examples: 1. We can define direction as : {type:STRING, format:enum, enum:[\"EAST\", NORTH\", \"SOUTH\", \"WEST\"]} 2. We can define apartment number as : {type:INTEGER, format:enum, enum:[\"101\", \"201\", \"301\"]}", "items": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/language.v1.json b/googleapiclient/discovery_cache/documents/language.v1.json index fa7212842f2..f2c11327b04 100644 --- a/googleapiclient/discovery_cache/documents/language.v1.json +++ b/googleapiclient/discovery_cache/documents/language.v1.json @@ -246,7 +246,7 @@ } } }, -"revision": "20240623", +"revision": "20240929", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { @@ -756,7 +756,11 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", +"A3_MEGAGPU_8G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1042,6 +1046,10 @@ "", "", "", +"", +"", +"", +"", "" ], "type": "string" @@ -1473,7 +1481,11 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", +"A3_MEGAGPU_8G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1759,6 +1771,10 @@ "", "", "", +"", +"", +"", +"", "" ], "type": "string" @@ -2149,7 +2165,11 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", +"A3_MEGAGPU_8G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -2435,6 +2455,10 @@ "", "", "", +"", +"", +"", +"", "" ], "type": "string" @@ -3616,6 +3640,7 @@ false "NVIDIA_A100_80GB", "NVIDIA_L4", "NVIDIA_H100_80GB", +"NVIDIA_H100_MEGA_80GB", "TPU_V2", "TPU_V3", "TPU_V4_POD", @@ -3632,6 +3657,7 @@ false "Nvidia A100 80GB GPU.", "Nvidia L4 GPU.", "Nvidia H100 80Gb GPU.", +"Nvidia H100 80Gb GPU.", "TPU v2 (JellyFish).", "TPU v3 (DragonFish).", "TPU_v4 (PufferFish).", diff --git a/googleapiclient/discovery_cache/documents/language.v1beta2.json b/googleapiclient/discovery_cache/documents/language.v1beta2.json index deff06dd8df..660d3c2724a 100644 --- a/googleapiclient/discovery_cache/documents/language.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/language.v1beta2.json @@ -246,7 +246,7 @@ } } }, -"revision": "20240623", +"revision": "20240929", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { @@ -756,7 +756,11 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", +"A3_MEGAGPU_8G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1042,6 +1046,10 @@ "", "", "", +"", +"", +"", +"", "" ], "type": "string" @@ -1491,7 +1499,11 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", +"A3_MEGAGPU_8G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -1777,6 +1789,10 @@ "", "", "", +"", +"", +"", +"", "" ], "type": "string" @@ -2167,7 +2183,11 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", +"A3_MEGAGPU_8G", "E2_STANDARD_2", "E2_STANDARD_4", "E2_STANDARD_8", @@ -2453,6 +2473,10 @@ "", "", "", +"", +"", +"", +"", "" ], "type": "string" @@ -3634,6 +3658,7 @@ false "NVIDIA_A100_80GB", "NVIDIA_L4", "NVIDIA_H100_80GB", +"NVIDIA_H100_MEGA_80GB", "TPU_V2", "TPU_V3", "TPU_V4_POD", @@ -3650,6 +3675,7 @@ false "Nvidia A100 80GB GPU.", "Nvidia L4 GPU.", "Nvidia H100 80Gb GPU.", +"Nvidia H100 80Gb GPU.", "TPU v2 (JellyFish).", "TPU v3 (DragonFish).", "TPU_v4 (PufferFish).", diff --git a/googleapiclient/discovery_cache/documents/language.v2.json b/googleapiclient/discovery_cache/documents/language.v2.json index a4cec0fd65b..06271fffea8 100644 --- a/googleapiclient/discovery_cache/documents/language.v2.json +++ b/googleapiclient/discovery_cache/documents/language.v2.json @@ -208,7 +208,7 @@ } } }, -"revision": "20240713", +"revision": "20240929", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { @@ -426,6 +426,11 @@ "name": { "description": "The name of the category representing the document.", "type": "string" +}, +"severity": { +"description": "Optional. The classifier's severity of the category. This is only present when the ModerateTextRequest.ModelVersion is set to MODEL_VERSION_2, and the corresponding category has a severity score.", +"format": "float", +"type": "number" } }, "type": "object" @@ -576,6 +581,9 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", "A3_MEGAGPU_8G", "E2_STANDARD_2", @@ -864,6 +872,9 @@ "", "", "", +"", +"", +"", "" ], "type": "string" @@ -1109,6 +1120,9 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", "A3_MEGAGPU_8G", "E2_STANDARD_2", @@ -1397,6 +1411,9 @@ "", "", "", +"", +"", +"", "" ], "type": "string" @@ -1460,6 +1477,20 @@ "document": { "$ref": "Document", "description": "Required. Input document." +}, +"modelVersion": { +"description": "Optional. The model version to use for ModerateText.", +"enum": [ +"MODEL_VERSION_UNSPECIFIED", +"MODEL_VERSION_1", +"MODEL_VERSION_2" +], +"enumDescriptions": [ +"The default model version.", +"Use the v1 model, this model is used by default when not provided. The v1 model only returns probability (confidence) score for each category.", +"Use the v2 model. The v2 model only returns probability (confidence) score for each category, and returns severity score for a subset of the categories." +], +"type": "string" } }, "type": "object" @@ -1528,6 +1559,9 @@ "A2_ULTRAGPU_2G", "A2_ULTRAGPU_4G", "A2_ULTRAGPU_8G", +"A3_HIGHGPU_1G", +"A3_HIGHGPU_2G", +"A3_HIGHGPU_4G", "A3_HIGHGPU_8G", "A3_MEGAGPU_8G", "E2_STANDARD_2", @@ -1816,6 +1850,9 @@ "", "", "", +"", +"", +"", "" ], "type": "string" diff --git a/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json b/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json index 2ebbd00b030..ef1ec5b76d0 100644 --- a/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json +++ b/googleapiclient/discovery_cache/documents/merchantapi.accounts_v1beta.json @@ -126,7 +126,7 @@ ] }, "delete": { -"description": "Deletes the specified account regardless of its type: standalone, MCA or sub-account. Deleting an MCA leads to the deletion of all of its sub-accounts. Executing this method requires admin access.", +"description": "Deletes the specified account regardless of its type: standalone, MCA or sub-account. Deleting an MCA leads to the deletion of all of its sub-accounts. Executing this method requires admin access. The deletion succeeds only if the account does not provide services to any other account and has no processed offers. You can use the `force` parameter to override this.", "flatPath": "accounts/v1beta/accounts/{accountsId}", "httpMethod": "DELETE", "id": "merchantapi.accounts.delete", @@ -134,6 +134,11 @@ "name" ], "parameters": { +"force": { +"description": "Optional. If set to `true`, the account is deleted even if it provides services to other accounts or has processed offers.", +"location": "query", +"type": "boolean" +}, "name": { "description": "Required. The name of the account to delete. Format: `accounts/{account}`", "location": "path", @@ -176,7 +181,7 @@ ] }, "list": { -"description": "Lists accounts accessible to the calling user and matching the constraints of the request such as page size or filters. This is not just listing the sub-accounts of an MCA, but all accounts the calling user has access to including other MCAs, linked accounts, standalone accounts and so on.", +"description": "Lists accounts accessible to the calling user and matching the constraints of the request such as page size or filters. This is not just listing the sub-accounts of an MCA, but all accounts the calling user has access to including other MCAs, linked accounts, standalone accounts and so on. If no filter is provided, then it returns accounts the user is directly added to.", "flatPath": "accounts/v1beta/accounts", "httpMethod": "GET", "id": "merchantapi.accounts.list", @@ -1412,24 +1417,9 @@ } } }, -"revision": "20240924", +"revision": "20241004", "rootUrl": "https://merchantapi.googleapis.com/", "schemas": { -"AcceptTermsOfService": { -"description": "Reference to a Terms of Service resource.", -"id": "AcceptTermsOfService", -"properties": { -"name": { -"description": "Required. The resource name of the terms of service version in the format `termsOfService/{version}`. To retrieve the latest version, use the [termsOfService.retrieveLatest](/merchant/api/reference/rest/accounts_v1beta/termsOfService/retrieveLatest) method.", -"type": "string" -}, -"regionCode": { -"description": "Required. Region code as defined by [CLDR](https://cldr.unicode.org/). This is either a country when the ToS applies specifically to that country or `001` when it applies globally.", -"type": "string" -} -}, -"type": "object" -}, "Accepted": { "description": "Describes the accepted terms of service.", "id": "Accepted", @@ -1764,10 +1754,6 @@ "description": "Request message for the `CreateAndConfigureAccount` method.", "id": "CreateAndConfigureAccountRequest", "properties": { -"acceptTermsOfService": { -"$ref": "AcceptTermsOfService", -"description": "Optional. The Terms of Service (ToS) to be accepted immediately upon account creation." -}, "account": { "$ref": "Account", "description": "Required. The account to be created." diff --git a/googleapiclient/discovery_cache/documents/merchantapi.conversions_v1beta.json b/googleapiclient/discovery_cache/documents/merchantapi.conversions_v1beta.json index de1a9d99569..b7b2ac205c1 100644 --- a/googleapiclient/discovery_cache/documents/merchantapi.conversions_v1beta.json +++ b/googleapiclient/discovery_cache/documents/merchantapi.conversions_v1beta.json @@ -172,7 +172,7 @@ ], "parameters": { "name": { -"description": "Required. The name of the conversion source to be fetched. Format: accounts/{account}/conversionsources/{conversion_source}", +"description": "Required. The name of the conversion source to be fetched. Format: accounts/{account}/conversionSources/{conversion_source}", "location": "path", "pattern": "^accounts/[^/]+/conversionSources/[^/]+$", "required": true, @@ -295,7 +295,7 @@ } } }, -"revision": "20240827", +"revision": "20241001", "rootUrl": "https://merchantapi.googleapis.com/", "schemas": { "AttributionSettings": { diff --git a/googleapiclient/discovery_cache/documents/merchantapi.notifications_v1beta.json b/googleapiclient/discovery_cache/documents/merchantapi.notifications_v1beta.json index e08f8ce0f04..45378a5f7e5 100644 --- a/googleapiclient/discovery_cache/documents/merchantapi.notifications_v1beta.json +++ b/googleapiclient/discovery_cache/documents/merchantapi.notifications_v1beta.json @@ -110,7 +110,7 @@ "notificationsubscriptions": { "methods": { "create": { -"description": "Creates a notification subscription for a merchant. We will allow the following types of notification subscriptions to exist together (per merchant as a subscriber per event type): 1. Subscription for all managed accounts + subscription for self 2. Multiple \"partial\" subscriptions for managed accounts + subscription for self we will not allow (per merchant as a subscriber per event type): 1. multiple self subscriptions. 2. multiple \"all managed accounts\" subscriptions. 3. all and partial subscriptions at the same time. 4. multiple partial subscriptions for the same target account", +"description": "Creates a notification subscription for a business. For standalone or subaccounts accounts, the business can create a subscription for self. For MCAs, the business can create a subscription for all managed accounts or for a specific subaccount. We will allow the following types of notification subscriptions to exist together (per business as a subscriber per event type): 1. Subscription for all managed accounts + subscription for self. 2. Multiple \"partial\" subscriptions for managed accounts + subscription for self. we will not allow (per business as a subscriber per event type): 1. Multiple self subscriptions. 2. Multiple \"all managed accounts\" subscriptions. 3. \"All managed accounts\" subscription and partial subscriptions at the same time. 4. Multiple partial subscriptions for the same target account.", "flatPath": "notifications/v1beta/accounts/{accountsId}/notificationsubscriptions", "httpMethod": "POST", "id": "merchantapi.accounts.notificationsubscriptions.create", @@ -262,7 +262,7 @@ } } }, -"revision": "20240827", +"revision": "20240930", "rootUrl": "https://merchantapi.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/merchantapi.products_v1beta.json b/googleapiclient/discovery_cache/documents/merchantapi.products_v1beta.json index 274e8cecc19..1aa53df3809 100644 --- a/googleapiclient/discovery_cache/documents/merchantapi.products_v1beta.json +++ b/googleapiclient/discovery_cache/documents/merchantapi.products_v1beta.json @@ -211,7 +211,7 @@ ], "parameters": { "pageSize": { -"description": "The maximum number of products to return. The service may return fewer than this value. The maximum value is 1000; values above 1000 will be coerced to 1000. If unspecified, the maximum number of products will be returned.", +"description": "The maximum number of products to return. The service may return fewer than this value. The maximum value is 250; values above 250 will be coerced to 250. If unspecified, the maximum number of products will be returned.", "format": "int32", "location": "query", "type": "integer" @@ -242,7 +242,7 @@ } } }, -"revision": "20240916", +"revision": "20241001", "rootUrl": "https://merchantapi.googleapis.com/", "schemas": { "Attributes": { @@ -1183,7 +1183,7 @@ false "type": "string" }, "name": { -"description": "The name of the product. Format: `\"{product.name=accounts/{account}/products/{product}}\"`", +"description": "The name of the product. Format: `\"{product.name=accounts/{account}/products/{product}}\"` where the last section `product` consists of 4 parts: channel~content_language~feed_label~offer_id example for product name is \"accounts/123/products/online~en~US~sku123\"", "type": "string" }, "offerId": { @@ -1358,7 +1358,7 @@ false "type": "string" }, "name": { -"description": "Identifier. The name of the product input. Format: `\"{productinput.name=accounts/{account}/productInputs/{productinput}}\"`", +"description": "Identifier. The name of the product input. Format: `\"{productinput.name=accounts/{account}/productInputs/{productinput}}\"` where the last section `productinput` consists of 4 parts: channel~content_language~feed_label~offer_id example for product input name is \"accounts/123/productInputs/online~en~US~sku123\"", "type": "string" }, "offerId": { diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1.json index f420c7c5335..b1e551cf26d 100644 --- a/googleapiclient/discovery_cache/documents/migrationcenter.v1.json +++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1.json @@ -2309,7 +2309,7 @@ } } }, -"revision": "20240906", +"revision": "20240926", "rootUrl": "https://migrationcenter.googleapis.com/", "schemas": { "AddAssetsToGroupRequest": { @@ -4365,7 +4365,8 @@ "type": "integer" }, "cpuThreadCount": { -"description": "Number of CPU threads allocated to the machine.", +"deprecated": true, +"description": "Deprecated: use MachineDetails.core_count instead. Number of CPU threads allocated to the machine.", "format": "int32", "type": "integer" }, @@ -5199,7 +5200,7 @@ "properties": { "coreCountHistogram": { "$ref": "ReportSummaryHistogramChartData", -"description": "Histogram showing a distribution of CPU core counts." +"description": "Histogram showing a distribution of logical CPU core counts." }, "memoryBytesHistogram": { "$ref": "ReportSummaryHistogramChartData", diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json index 7cd88da73d6..75349df2414 100644 --- a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json @@ -2548,7 +2548,7 @@ } } }, -"revision": "20240919", +"revision": "20240926", "rootUrl": "https://migrationcenter.googleapis.com/", "schemas": { "AddAssetsToGroupRequest": { @@ -4149,7 +4149,7 @@ "type": "string" }, "edition": { -"description": "Optional. Cloud SQL edition. For SQL Server, only Enterprise is available.", +"description": "Optional. Preferred Cloud SQL edition.", "enum": [ "CLOUD_SQL_EDITION_UNSPECIFIED", "CLOUD_SQL_EDITION_ENTERPRISE", @@ -7073,7 +7073,7 @@ false }, "coreCountHistogram": { "$ref": "ReportSummaryHistogramChartData", -"description": "Histogram showing a distribution of CPU core counts." +"description": "Histogram showing a distribution of logical CPU core counts." }, "databaseTypes": { "$ref": "ReportSummaryChartData", @@ -8277,7 +8277,8 @@ false "type": "integer" }, "cpuThreadCount": { -"description": "Number of CPU threads allocated to the machine.", +"deprecated": true, +"description": "Deprecated: use VirtualMachineDetails.core_count instead. Number of CPU threads allocated to the machine.", "format": "int32", "type": "integer" }, diff --git a/googleapiclient/discovery_cache/documents/monitoring.v1.json b/googleapiclient/discovery_cache/documents/monitoring.v1.json index 50b85409e38..7e3d45d2268 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v1.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v1.json @@ -753,7 +753,7 @@ } } }, -"revision": "20240829", +"revision": "20240929", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -1413,7 +1413,8 @@ "CLOUD_SQL_STORAGE", "UPTIME_CHECK_FAILURE", "CLOUD_ALERTING_ALERT", -"SERVICE_HEALTH_INCIDENT" +"SERVICE_HEALTH_INCIDENT", +"SAP_BACKINT" ], "enumDescriptions": [ "No event type specified.", @@ -1437,7 +1438,8 @@ "Storage event for a Cloud SQL instance.", "Failure of a Cloud Monitoring uptime check.", "Alerts from Cloud Alerting", -"Incidents from Service Health" +"Incidents from Service Health", +"Agent for SAP Backint related events." ], "type": "string" }, @@ -2054,7 +2056,7 @@ "type": "string" }, "query": { -"description": "A PromQL query string. Query lanauge documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/.", +"description": "A PromQL query string. Query language documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/.", "type": "string" }, "start": { @@ -2069,7 +2071,7 @@ "id": "QueryInstantRequest", "properties": { "query": { -"description": "A PromQL query string. Query lanauge documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/.", +"description": "A PromQL query string. Query language documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/.", "type": "string" }, "time": { @@ -2111,7 +2113,7 @@ "type": "string" }, "query": { -"description": "A PromQL query string. Query lanauge documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/.", +"description": "A PromQL query string. Query language documentation: https://prometheus.io/docs/prometheus/latest/querying/basics/.", "type": "string" }, "start": { diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json index eb12abcd802..a3a84061f0e 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v3.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json @@ -2715,7 +2715,7 @@ } } }, -"revision": "20240829", +"revision": "20240929", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -3020,6 +3020,17 @@ }, "type": "object" }, +"BooleanTest": { +"description": "A test that uses an alerting result in a boolean column produced by the SQL query.", +"id": "BooleanTest", +"properties": { +"column": { +"description": "Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored.", +"type": "string" +} +}, +"type": "object" +}, "BucketOptions": { "description": "BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.", "id": "BucketOptions", @@ -3243,6 +3254,10 @@ "$ref": "PrometheusQueryLanguageCondition", "description": "A condition that uses the Prometheus query language to define alerts." }, +"conditionSql": { +"$ref": "SqlCondition", +"description": "A condition that uses SQL to define alerts in Logs Analytics." +}, "conditionThreshold": { "$ref": "MetricThreshold", "description": "A condition that compares a time series against a threshold." @@ -3393,6 +3408,22 @@ "properties": {}, "type": "object" }, +"Daily": { +"description": "Used to schedule the query to run every so many days.", +"id": "Daily", +"properties": { +"executionTime": { +"$ref": "TimeOfDay", +"description": "Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day." +}, +"periodicity": { +"description": "Required. LINT.IfChange The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc)", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "Distribution": { "description": "Distribution contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths.Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless.", "id": "Distribution", @@ -3854,6 +3885,23 @@ }, "type": "object" }, +"Hourly": { +"description": "Used to schedule the query to run every so many hours.", +"id": "Hourly", +"properties": { +"minuteOffset": { +"description": "Optional. LINT.IfChange The number of minutes after the hour (in UTC) to run the query. Must be between 0 and 59 inclusive. If left unspecified, then an arbitrary offset is used. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc)", +"format": "int32", +"type": "integer" +}, +"periodicity": { +"description": "Required. LINT.IfChange The number of hours between runs. Must be greater than or equal to 1 hour and less than or equal to 48 hours. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc)", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "HttpCheck": { "description": "Information involved in an HTTP/HTTPS Uptime check request.", "id": "HttpCheck", @@ -4744,6 +4792,18 @@ }, "type": "object" }, +"Minutes": { +"description": "Used to schedule the query to run every so many minutes.", +"id": "Minutes", +"properties": { +"periodicity": { +"description": "Required. LINT.IfChange Number of minutes between runs. The interval must be between 5 minutes and 1440 minutes. LINT.ThenChange(//depot/google3/cloud/monitoring/api/alerts/policy_validation.cc)", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "MonitoredResource": { "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"project_id\", \"instance_id\" and \"zone\": { \"type\": \"gce_instance\", \"labels\": { \"project_id\": \"my-project\", \"instance_id\": \"12345678901234\", \"zone\": \"us-central1-a\" }} ", "id": "MonitoredResource", @@ -5356,6 +5416,40 @@ }, "type": "object" }, +"RowCountTest": { +"description": "A test that checks if the number of rows in the result set violates some threshold.", +"id": "RowCountTest", +"properties": { +"comparison": { +"description": "Required. The comparison to apply between the number of rows returned by the query and the threshold.", +"enum": [ +"COMPARISON_UNSPECIFIED", +"COMPARISON_GT", +"COMPARISON_GE", +"COMPARISON_LT", +"COMPARISON_LE", +"COMPARISON_EQ", +"COMPARISON_NE" +], +"enumDescriptions": [ +"No ordering relationship is specified.", +"True if the left argument is greater than the right argument.", +"True if the left argument is greater than or equal to the right argument.", +"True if the left argument is less than the right argument.", +"True if the left argument is less than or equal to the right argument.", +"True if the left argument is equal to the right argument.", +"True if the left argument is not equal to the right argument." +], +"type": "string" +}, +"threshold": { +"description": "Required. The value against which to compare the row count.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "SendNotificationChannelVerificationCodeRequest": { "description": "The SendNotificationChannelVerificationCode request.", "id": "SendNotificationChannelVerificationCodeRequest", @@ -5575,6 +5669,37 @@ }, "type": "object" }, +"SqlCondition": { +"description": "A condition that allows alert policies to be defined using GoogleSQL. SQL conditions examine a sliding window of logs using GoogleSQL. Alert policies with SQL conditions may incur additional billing.", +"id": "SqlCondition", +"properties": { +"booleanTest": { +"$ref": "BooleanTest", +"description": "Test the boolean value in the indicated column." +}, +"daily": { +"$ref": "Daily", +"description": "Schedule the query to execute every so many days." +}, +"hourly": { +"$ref": "Hourly", +"description": "Schedule the query to execute every so many hours." +}, +"minutes": { +"$ref": "Minutes", +"description": "Schedule the query to execute every so many minutes." +}, +"query": { +"description": "Required. The Log Analytics SQL query to run, as a string. The query must conform to the required shape. Specifically, the query must not try to filter the input by time. A filter will automatically be applied to filter the input so that the query receives all rows received since the last time the query was run.E.g. Extract all log entries containing an HTTP request:SELECT timestamp, log_name, severity, http_request, resource, labels FROM my-project.global._Default._AllLogs WHERE http_request IS NOT NULL", +"type": "string" +}, +"rowCountTest": { +"$ref": "RowCountTest", +"description": "Test the row count against a threshold." +} +}, +"type": "object" +}, "Status": { "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors).", "id": "Status", @@ -5657,6 +5782,33 @@ }, "type": "object" }, +"TimeOfDay": { +"description": "Represents a time of day. The date and time zone are either not significant or are specified elsewhere. An API may choose to allow leap seconds. Related types are google.type.Date and google.protobuf.Timestamp.", +"id": "TimeOfDay", +"properties": { +"hours": { +"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"format": "int32", +"type": "integer" +}, +"minutes": { +"description": "Minutes of hour of day. Must be from 0 to 59.", +"format": "int32", +"type": "integer" +}, +"nanos": { +"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"format": "int32", +"type": "integer" +}, +"seconds": { +"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "TimeSeries": { "description": "A collection of data points that describes the time-varying values of a metric. A time series is identified by a combination of a fully-specified monitored resource and a fully-specified metric. This type is used for both listing and creating time series.", "id": "TimeSeries", diff --git a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json index ce212883df7..0971cca8c1c 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json @@ -612,7 +612,7 @@ } } }, -"revision": "20240707", +"revision": "20241002", "rootUrl": "https://mybusinessbusinessinformation.googleapis.com/", "schemas": { "AdWordsLocationExtensions": { @@ -1696,22 +1696,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json index c6cfe84267e..d2ada082af6 100644 --- a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json @@ -194,7 +194,7 @@ } } }, -"revision": "20240707", +"revision": "20241002", "rootUrl": "https://mybusinesslodging.googleapis.com/", "schemas": { "Accessibility": { @@ -5215,22 +5215,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1.json b/googleapiclient/discovery_cache/documents/osconfig.v1.json index 10c512d5b3a..bded9c61a76 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1.json @@ -1083,7 +1083,7 @@ } } }, -"revision": "20240901", +"revision": "20241002", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "AptSettings": { @@ -3500,22 +3500,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json index 94f8cda6632..ada8ea2aa19 100644 --- a/googleapiclient/discovery_cache/documents/osconfig.v1beta.json +++ b/googleapiclient/discovery_cache/documents/osconfig.v1beta.json @@ -689,7 +689,7 @@ } } }, -"revision": "20240630", +"revision": "20241002", "rootUrl": "https://osconfig.googleapis.com/", "schemas": { "AptRepository": { @@ -2268,22 +2268,22 @@ "id": "TimeOfDay", "properties": { "hours": { -"description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", +"description": "Hours of a day in 24 hour format. Must be greater than or equal to 0 and typically must be less than or equal to 23. An API may choose to allow the value \"24:00:00\" for scenarios like business closing time.", "format": "int32", "type": "integer" }, "minutes": { -"description": "Minutes of hour of day. Must be from 0 to 59.", +"description": "Minutes of an hour. Must be greater than or equal to 0 and less than or equal to 59.", "format": "int32", "type": "integer" }, "nanos": { -"description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", +"description": "Fractions of seconds, in nanoseconds. Must be greater than or equal to 0 and less than or equal to 999,999,999.", "format": "int32", "type": "integer" }, "seconds": { -"description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.", +"description": "Seconds of a minute. Must be greater than or equal to 0 and typically must be less than or equal to 59. An API may allow the value 60 if it allows leap-seconds.", "format": "int32", "type": "integer" } diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json index ef782f20318..64930e0d25e 100644 --- a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json +++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json @@ -1305,7 +1305,7 @@ } } }, -"revision": "20240806", +"revision": "20240930", "rootUrl": "https://realtimebidding.googleapis.com/", "schemas": { "ActivatePretargetingConfigRequest": { @@ -3251,11 +3251,11 @@ "readOnly": true }, "videoUrl": { -"description": "The URL to fetch a video ad.", +"description": "The URL to fetch a video ad. The URL should return an XML response that conforms to the VAST 2.0, 3.0 or 4.x standard.", "type": "string" }, "videoVastXml": { -"description": "The contents of a VAST document for a video ad. This document should conform to the VAST 2.0 or 3.0 standard.", +"description": "The contents of a VAST document for a video ad. This document should conform to the VAST 2.0, 3.0, or 4.x standard.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/run.v1.json b/googleapiclient/discovery_cache/documents/run.v1.json index bbcb9fce6e6..f55391f40d6 100644 --- a/googleapiclient/discovery_cache/documents/run.v1.json +++ b/googleapiclient/discovery_cache/documents/run.v1.json @@ -2614,7 +2614,7 @@ } } }, -"revision": "20240916", +"revision": "20240927", "rootUrl": "https://run.googleapis.com/", "schemas": { "Addressable": { @@ -3801,11 +3801,13 @@ "description": "Optional. Option to specify how default logs buckets are setup.", "enum": [ "DEFAULT_LOGS_BUCKET_BEHAVIOR_UNSPECIFIED", -"REGIONAL_USER_OWNED_BUCKET" +"REGIONAL_USER_OWNED_BUCKET", +"LEGACY_BUCKET" ], "enumDescriptions": [ "Unspecified.", -"Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project." +"Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project.", +"Bucket is located in a Google-owned project and is not regionalized." ], "type": "string" }, @@ -5381,7 +5383,7 @@ false "additionalProperties": { "type": "string" }, -"description": "Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution.", +"description": "Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service. * `run.googleapis.com/build-id`: Service. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution.", "type": "object" }, "clusterName": { diff --git a/googleapiclient/discovery_cache/documents/run.v2.json b/googleapiclient/discovery_cache/documents/run.v2.json index 2879c924e38..afab4bcb0bc 100644 --- a/googleapiclient/discovery_cache/documents/run.v2.json +++ b/googleapiclient/discovery_cache/documents/run.v2.json @@ -1526,7 +1526,7 @@ } } }, -"revision": "20240916", +"revision": "20240927", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -3258,6 +3258,10 @@ ], "type": "string" }, +"invokerIamDisabled": { +"description": "Optional. Disables IAM permission check for run.routes.invoke for callers of this service. This setting should not be used with external ingress.", +"type": "boolean" +}, "labels": { "additionalProperties": { "type": "string" @@ -3395,7 +3399,7 @@ "id": "GoogleCloudRunV2ServiceScaling", "properties": { "minInstanceCount": { -"description": "Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving. (BETA)", +"description": "Optional. total min instances for the service. This number of instances is divided among all revisions with specified traffic based on the percent of traffic they are receiving.", "format": "int32", "type": "integer" }, @@ -4309,11 +4313,13 @@ "description": "Optional. Option to specify how default logs buckets are setup.", "enum": [ "DEFAULT_LOGS_BUCKET_BEHAVIOR_UNSPECIFIED", -"REGIONAL_USER_OWNED_BUCKET" +"REGIONAL_USER_OWNED_BUCKET", +"LEGACY_BUCKET" ], "enumDescriptions": [ "Unspecified.", -"Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project." +"Bucket is located in user-owned project in the same region as the build. The builder service account must have access to create and write to Cloud Storage buckets in the build project.", +"Bucket is located in a Google-owned project and is not regionalized." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/searchads360.v0.json b/googleapiclient/discovery_cache/documents/searchads360.v0.json index 61703f93d09..6cddc058448 100644 --- a/googleapiclient/discovery_cache/documents/searchads360.v0.json +++ b/googleapiclient/discovery_cache/documents/searchads360.v0.json @@ -260,7 +260,7 @@ } } }, -"revision": "20240822", +"revision": "20241002", "rootUrl": "https://searchads360.googleapis.com/", "schemas": { "GoogleAdsSearchads360V0Common__AdScheduleInfo": { @@ -1170,11 +1170,21 @@ "format": "double", "type": "number" }, +"crossDeviceConversionsByConversionDate": { +"description": "The number of cross-device conversions by conversion date. Details for the by_conversion_date columns are available at https://support.google.com/sa360/answer/9250611.", +"format": "double", +"type": "number" +}, "crossDeviceConversionsValue": { "description": "The sum of the value of cross-device conversions.", "format": "double", "type": "number" }, +"crossDeviceConversionsValueByConversionDate": { +"description": "The sum of cross-device conversions value by conversion date. Details for the by_conversion_date columns are available at https://support.google.com/sa360/answer/9250611.", +"format": "double", +"type": "number" +}, "crossSellCostOfGoodsSoldMicros": { "description": "Cross-sell cost of goods sold (COGS) is the total cost of products sold as a result of advertising a different product. How it works: You report conversions with cart data for completed purchases on your website. If the ad that was interacted with before the purchase has an associated product (see Shopping Ads) then this product is considered the advertised product. Any product included in the order the customer places is a sold product. If these products don't match then this is considered cross-sell. Cross-sell cost of goods sold is the total cost of the products sold that weren't advertised. Example: Someone clicked on a Shopping ad for a hat then bought the same hat and a shirt. The hat has a cost of goods sold value of $3, the shirt has a cost of goods sold value of $5. The cross-sell cost of goods sold for this order is $5. This metric is only available if you report conversions with cart data. This metric is a monetary value and returned in the customer's currency by default. See the metrics_currency parameter at https://developers.google.com/search-ads/reporting/query/query-structure#parameters_clause", "format": "int64", @@ -1200,6 +1210,16 @@ "format": "double", "type": "number" }, +"generalInvalidClickRate": { +"description": "The percentage of clicks that have been filtered out of your total number of clicks (filtered + non-filtered clicks) due to being general invalid clicks. These are clicks Google considers illegitimate that are detected through routine means of filtration (that is, known invalid data-center traffic, bots and spiders or other crawlers, irregular patterns, etc). You're not charged for them, and they don't affect your account statistics. See the help page at https://support.google.com/campaignmanager/answer/6076504 for details.", +"format": "double", +"type": "number" +}, +"generalInvalidClicks": { +"description": "Number of general invalid clicks. These are a subset of your invalid clicks that are detected through routine means of filtration (such as known invalid data-center traffic, bots and spiders or other crawlers, irregular patterns, etc.). You're not charged for them, and they don't affect your account statistics. See the help page at https://support.google.com/campaignmanager/answer/6076504 for details.", +"format": "int64", +"type": "string" +}, "historicalCreativeQualityScore": { "description": "The creative historical quality score.", "enum": [ @@ -3384,7 +3404,7 @@ "id": "GoogleAdsSearchads360V0Resources_Campaign_SelectiveOptimization", "properties": { "conversionActions": { -"description": "The selected set of conversion actions for optimizing this campaign.", +"description": "The selected set of resource names for conversion actions for optimizing this campaign.", "items": { "type": "string" }, @@ -4020,6 +4040,14 @@ "readOnly": true, "type": "string" }, +"effectiveLabels": { +"description": "Output only. The resource names of effective labels attached to this ad group. An effective label is a label inherited or directly assigned to this ad group.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, "endDate": { "description": "Output only. Date when the ad group ends serving ads. By default, the ad group ends on the ad group's end date. If this field is set, then the ad group ends at the end of the specified date in the customer's time zone. This field is only available for Microsoft Advertising and Facebook gateway accounts. Format: YYYY-MM-DD Example: 2019-03-14", "readOnly": true, @@ -4761,6 +4789,31 @@ false }, "type": "object" }, +"GoogleAdsSearchads360V0Resources__AdGroupEffectiveLabel": { +"description": "A relationship between an ad group and an effective label. An effective label is a label inherited or directly assigned to this ad group.", +"id": "GoogleAdsSearchads360V0Resources__AdGroupEffectiveLabel", +"properties": { +"adGroup": { +"description": "Immutable. The ad group to which the effective label is attached.", +"type": "string" +}, +"label": { +"description": "Immutable. The effective label assigned to the ad group.", +"type": "string" +}, +"ownerCustomerId": { +"description": "Output only. The ID of the Customer which owns the effective label.", +"format": "int64", +"readOnly": true, +"type": "string" +}, +"resourceName": { +"description": "Immutable. The resource name of the ad group effective label. Ad group effective label resource names have the form: `customers/{customer_id}/adGroupEffectiveLabels/{ad_group_id}~{label_id}`", +"type": "string" +} +}, +"type": "object" +}, "GoogleAdsSearchads360V0Resources__AdGroupLabel": { "description": "A relationship between an ad group and a label.", "id": "GoogleAdsSearchads360V0Resources__AdGroupLabel", @@ -5651,7 +5704,7 @@ false "type": "string" }, "biddingStrategy": { -"description": "Portfolio bidding strategy used by campaign.", +"description": "The resource name of the portfolio bidding strategy used by the campaign.", "type": "string" }, "biddingStrategySystemStatus": { @@ -5765,7 +5818,7 @@ false "type": "string" }, "campaignBudget": { -"description": "The budget of the campaign.", +"description": "The resource name of the campaign budget of the campaign.", "type": "string" }, "createTime": { @@ -5782,6 +5835,14 @@ false "$ref": "GoogleAdsSearchads360V0Resources_Campaign_DynamicSearchAdsSetting", "description": "The setting for controlling Dynamic Search Ads (DSA)." }, +"effectiveLabels": { +"description": "Output only. The resource names of effective labels attached to this campaign. An effective label is a label inherited or directly assigned to this campaign.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, "endDate": { "description": "The last day of the campaign in serving customer's timezone in YYYY-MM-DD format. On create, defaults to 2037-12-30, which means the campaign will run indefinitely. To set an existing campaign to run indefinitely, set this field to 2037-12-30.", "type": "string" @@ -6351,6 +6412,31 @@ false }, "type": "object" }, +"GoogleAdsSearchads360V0Resources__CampaignEffectiveLabel": { +"description": "Represents a relationship between a campaign and an effective label. An effective label is a label inherited or directly assigned to this campaign.", +"id": "GoogleAdsSearchads360V0Resources__CampaignEffectiveLabel", +"properties": { +"campaign": { +"description": "Immutable. The campaign to which the effective label is attached.", +"type": "string" +}, +"label": { +"description": "Immutable. The effective label assigned to the campaign.", +"type": "string" +}, +"ownerCustomerId": { +"description": "Output only. The ID of the Customer which owns the effective label.", +"format": "int64", +"readOnly": true, +"type": "string" +}, +"resourceName": { +"description": "Immutable. Name of the resource. CampaignEffectivelabel resource names have the form: `customers/{customer_id}/campaignEffectiveLabels/{campaign_id}~{label_id}`", +"type": "string" +} +}, +"type": "object" +}, "GoogleAdsSearchads360V0Resources__CampaignLabel": { "description": "Represents a relationship between a campaign and a label.", "id": "GoogleAdsSearchads360V0Resources__CampaignLabel", @@ -8339,6 +8425,10 @@ false "$ref": "GoogleAdsSearchads360V0Resources__AdGroupCriterionLabel", "description": "The ad group criterion label referenced in the query." }, +"adGroupEffectiveLabel": { +"$ref": "GoogleAdsSearchads360V0Resources__AdGroupEffectiveLabel", +"description": "The ad group effective label referenced in the query." +}, "adGroupLabel": { "$ref": "GoogleAdsSearchads360V0Resources__AdGroupLabel", "description": "The ad group label referenced in the query." @@ -8411,6 +8501,10 @@ false "$ref": "GoogleAdsSearchads360V0Resources__CampaignCriterion", "description": "The campaign criterion referenced in the query." }, +"campaignEffectiveLabel": { +"$ref": "GoogleAdsSearchads360V0Resources__CampaignEffectiveLabel", +"description": "The campaign effective label referenced in the query." +}, "campaignLabel": { "$ref": "GoogleAdsSearchads360V0Resources__CampaignLabel", "description": "The campaign label referenced in the query." diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json index 91c463077dc..954af7dd4cb 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json @@ -732,39 +732,6 @@ "resources": { "muteConfigs": { "methods": { -"create": { -"description": "Creates a mute config.", -"flatPath": "v1/folders/{foldersId}/locations/{locationsId}/muteConfigs", -"httpMethod": "POST", -"id": "securitycenter.folders.locations.muteConfigs.create", -"parameterOrder": [ -"parent" -], -"parameters": { -"muteConfigId": { -"description": "Required. Unique identifier provided by the client within the parent scope. It must consist of only lowercase letters, numbers, and hyphens, must start with a letter, must end with either a letter or a number, and must be 63 characters or less.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. Resource name of the new mute configs's parent. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, or `projects/[project_id]`.", -"location": "path", -"pattern": "^folders/[^/]+/locations/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+parent}/muteConfigs", -"request": { -"$ref": "GoogleCloudSecuritycenterV1MuteConfig" -}, -"response": { -"$ref": "GoogleCloudSecuritycenterV1MuteConfig" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "delete": { "description": "Deletes an existing mute config.", "flatPath": "v1/folders/{foldersId}/locations/{locationsId}/muteConfigs/{muteConfigsId}", @@ -815,42 +782,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists mute configs.", -"flatPath": "v1/folders/{foldersId}/locations/{locationsId}/muteConfigs", -"httpMethod": "GET", -"id": "securitycenter.folders.locations.muteConfigs.list", -"parameterOrder": [ -"parent" -], -"parameters": { -"pageSize": { -"description": "The maximum number of configs to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "A page token, received from a previous `ListMuteConfigs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListMuteConfigs` must match the call that provided the page token.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. The parent, which owns the collection of mute configs. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, `projects/[project_id]`.", -"location": "path", -"pattern": "^folders/[^/]+/locations/[^/]+/muteConfigs$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+parent}", -"response": { -"$ref": "ListMuteConfigsResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "patch": { "description": "Updates a mute config.", "flatPath": "v1/folders/{foldersId}/locations/{locationsId}/muteConfigs/{muteConfigsId}", @@ -2508,39 +2439,6 @@ "resources": { "muteConfigs": { "methods": { -"create": { -"description": "Creates a mute config.", -"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/muteConfigs", -"httpMethod": "POST", -"id": "securitycenter.organizations.locations.muteConfigs.create", -"parameterOrder": [ -"parent" -], -"parameters": { -"muteConfigId": { -"description": "Required. Unique identifier provided by the client within the parent scope. It must consist of only lowercase letters, numbers, and hyphens, must start with a letter, must end with either a letter or a number, and must be 63 characters or less.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. Resource name of the new mute configs's parent. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, or `projects/[project_id]`.", -"location": "path", -"pattern": "^organizations/[^/]+/locations/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+parent}/muteConfigs", -"request": { -"$ref": "GoogleCloudSecuritycenterV1MuteConfig" -}, -"response": { -"$ref": "GoogleCloudSecuritycenterV1MuteConfig" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "delete": { "description": "Deletes an existing mute config.", "flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/muteConfigs/{muteConfigsId}", @@ -2591,42 +2489,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists mute configs.", -"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/muteConfigs", -"httpMethod": "GET", -"id": "securitycenter.organizations.locations.muteConfigs.list", -"parameterOrder": [ -"parent" -], -"parameters": { -"pageSize": { -"description": "The maximum number of configs to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "A page token, received from a previous `ListMuteConfigs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListMuteConfigs` must match the call that provided the page token.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. The parent, which owns the collection of mute configs. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, `projects/[project_id]`.", -"location": "path", -"pattern": "^organizations/[^/]+/locations/[^/]+/muteConfigs$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+parent}", -"response": { -"$ref": "ListMuteConfigsResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "patch": { "description": "Updates a mute config.", "flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/muteConfigs/{muteConfigsId}", @@ -5017,39 +4879,6 @@ "resources": { "muteConfigs": { "methods": { -"create": { -"description": "Creates a mute config.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/muteConfigs", -"httpMethod": "POST", -"id": "securitycenter.projects.locations.muteConfigs.create", -"parameterOrder": [ -"parent" -], -"parameters": { -"muteConfigId": { -"description": "Required. Unique identifier provided by the client within the parent scope. It must consist of only lowercase letters, numbers, and hyphens, must start with a letter, must end with either a letter or a number, and must be 63 characters or less.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. Resource name of the new mute configs's parent. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, or `projects/[project_id]`.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+parent}/muteConfigs", -"request": { -"$ref": "GoogleCloudSecuritycenterV1MuteConfig" -}, -"response": { -"$ref": "GoogleCloudSecuritycenterV1MuteConfig" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "delete": { "description": "Deletes an existing mute config.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/muteConfigs/{muteConfigsId}", @@ -5100,42 +4929,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, -"list": { -"description": "Lists mute configs.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/muteConfigs", -"httpMethod": "GET", -"id": "securitycenter.projects.locations.muteConfigs.list", -"parameterOrder": [ -"parent" -], -"parameters": { -"pageSize": { -"description": "The maximum number of configs to return. The service may return fewer than this value. If unspecified, at most 10 configs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", -"format": "int32", -"location": "query", -"type": "integer" -}, -"pageToken": { -"description": "A page token, received from a previous `ListMuteConfigs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListMuteConfigs` must match the call that provided the page token.", -"location": "query", -"type": "string" -}, -"parent": { -"description": "Required. The parent, which owns the collection of mute configs. Its format is `organizations/[organization_id]`, `folders/[folder_id]`, `projects/[project_id]`.", -"location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+/muteConfigs$", -"required": true, -"type": "string" -} -}, -"path": "v1/{+parent}", -"response": { -"$ref": "ListMuteConfigsResponse" -}, -"scopes": [ -"https://www.googleapis.com/auth/cloud-platform" -] -}, "patch": { "description": "Updates a mute config.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/muteConfigs/{muteConfigsId}", @@ -6084,7 +5877,7 @@ } } }, -"revision": "20240916", +"revision": "20240926", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -6630,6 +6423,10 @@ "description": "Represents an Azure resource group.", "id": "AzureResourceGroup", "properties": { +"id": { +"description": "The ID of the Azure resource group.", +"type": "string" +}, "name": { "description": "The name of the Azure resource group. This is not a UUID.", "type": "string" @@ -6804,6 +6601,17 @@ }, "type": "object" }, +"CelPolicySpec": { +"description": "YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: \"compute.googleapis.com/Instance\" - resource_types: \"compute.googleapis.com/Firewall\" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies.", +"id": "CelPolicySpec", +"properties": { +"spec": { +"description": "The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false.", +"type": "string" +} +}, +"type": "object" +}, "CloudArmor": { "description": "Fields related to Google Cloud Armor findings.", "id": "CloudArmor", @@ -8242,6 +8050,10 @@ "description": "Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify.", "id": "GoogleCloudSecuritycenterV1CustomConfig", "properties": { +"celPolicy": { +"$ref": "CelPolicySpec", +"description": "The CEL policy spec attached to the custom module." +}, "customOutput": { "$ref": "GoogleCloudSecuritycenterV1CustomOutputSpec", "description": "Custom output properties." @@ -9324,6 +9136,10 @@ "description": "Represents an Azure resource group.", "id": "GoogleCloudSecuritycenterV2AzureResourceGroup", "properties": { +"id": { +"description": "The ID of the Azure resource group.", +"type": "string" +}, "name": { "description": "The name of the Azure resource group. This is not a UUID.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json index 36bd6b1165f..eccb9c40d16 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json @@ -903,7 +903,7 @@ } } }, -"revision": "20240916", +"revision": "20240926", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -1313,6 +1313,10 @@ "description": "Represents an Azure resource group.", "id": "AzureResourceGroup", "properties": { +"id": { +"description": "The ID of the Azure resource group.", +"type": "string" +}, "name": { "description": "The name of the Azure resource group. This is not a UUID.", "type": "string" @@ -1435,6 +1439,17 @@ "properties": {}, "type": "object" }, +"CelPolicySpec": { +"description": "YAML-based rule that uses CEL, which supports the declaration of variables and a filtering predicate. A vulnerable resource is emitted if the evaluation is false. Given: 1) the resource types as: - resource_types: \"compute.googleapis.com/Instance\" - resource_types: \"compute.googleapis.com/Firewall\" 2) the CEL policy spec as: name: bad_instance resource_filters: - name: instance resource_type: compute.googleapis.com/Instance filter: > instance.status == 'RUNNING' && 'public' in instance.tags.items - name: firewall resource_type: compute.googleapis.com/Firewall filter: > firewall.direction == 'INGRESS' && !firewall.disabled && firewall.allowed.exists(rule, rule.IPProtocol.upperAscii() in ['TCP', 'ALL'] && rule.ports.exists(port, network.portsInRange(port, '11-256'))) rule: match: - predicate: > instance.networkInterfaces.exists(net, firewall.network == net.network) output: > {'message': 'Compute instance with publicly accessible ports', 'instance': instance.name} Users are able to join resource types together using the exact format as Kubernetes Validating Admission policies.", +"id": "CelPolicySpec", +"properties": { +"spec": { +"description": "The CEL policy to evaluate to produce findings. A finding is generated when the policy validation evaluates to false.", +"type": "string" +} +}, +"type": "object" +}, "CloudArmor": { "description": "Fields related to Google Cloud Armor findings.", "id": "CloudArmor", @@ -2708,6 +2723,10 @@ "description": "Defines the properties in a custom module configuration for Security Health Analytics. Use the custom module configuration to create custom detectors that generate custom findings for resources that you specify.", "id": "GoogleCloudSecuritycenterV1CustomConfig", "properties": { +"celPolicy": { +"$ref": "CelPolicySpec", +"description": "The CEL policy spec attached to the custom module." +}, "customOutput": { "$ref": "GoogleCloudSecuritycenterV1CustomOutputSpec", "description": "Custom output properties." @@ -3871,6 +3890,10 @@ "description": "Represents an Azure resource group.", "id": "GoogleCloudSecuritycenterV2AzureResourceGroup", "properties": { +"id": { +"description": "The ID of the Azure resource group.", +"type": "string" +}, "name": { "description": "The name of the Azure resource group. This is not a UUID.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json index c370e479ca4..856b7f65da3 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json @@ -1993,7 +1993,7 @@ } } }, -"revision": "20240918", +"revision": "20240927", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json index 1c39f33b396..751dff105b1 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json @@ -542,7 +542,7 @@ } } }, -"revision": "20240919", +"revision": "20240929", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { @@ -2037,7 +2037,7 @@ "type": "object" }, "Mixin": { -"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", +"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json index 212e8e7424f..375678dd8d2 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json @@ -715,7 +715,7 @@ } } }, -"revision": "20240919", +"revision": "20240929", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "Api": { @@ -2086,7 +2086,7 @@ "type": "object" }, "Mixin": { -"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", +"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json index c00d02735cc..a5e8c5b6dcd 100644 --- a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json @@ -830,7 +830,7 @@ } } }, -"revision": "20240920", +"revision": "20240927", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { @@ -2636,7 +2636,7 @@ "type": "object" }, "Mixin": { -"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", +"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json index df446e51610..7041614482c 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json @@ -426,7 +426,7 @@ } } }, -"revision": "20240919", +"revision": "20240929", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { @@ -2675,7 +2675,7 @@ "type": "object" }, "Mixin": { -"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", +"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json index ec6e79e88c9..fc162c11e0f 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json @@ -964,7 +964,7 @@ } } }, -"revision": "20240919", +"revision": "20240929", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { @@ -3370,7 +3370,7 @@ "type": "object" }, "Mixin": { -"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inherting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", +"description": "Declares an API Interface to be included in this interface. The including interface must redeclare all the methods from the included interface, but documentation and options are inherited as follows: - If after comment and whitespace stripping, the documentation string of the redeclared method is empty, it will be inherited from the original method. - Each annotation belonging to the service config (http, visibility) which is not set in the redeclared method will be inherited. - If an http annotation is inherited, the path pattern will be modified as follows. Any version prefix will be replaced by the version of the including interface plus the root path if specified. Example of a simple mixin: package google.acl.v1; service AccessControl { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v1/{resource=**}:getAcl\"; } } package google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest) returns (Acl); // Get a data record. rpc GetData(GetDataRequest) returns (Data) { option (google.api.http).get = \"/v2/{resource=**}\"; } } Example of a mixin configuration: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl The mixin construct implies that all methods in `AccessControl` are also declared with same name and request/response types in `Storage`. A documentation generator or annotation processor will see the effective `Storage.GetAcl` method after inheriting documentation and annotations as follows: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/{resource=**}:getAcl\"; } ... } Note how the version in the path pattern changed from `v1` to `v2`. If the `root` field in the mixin is specified, it should be a relative path under which inherited HTTP paths are placed. Example: apis: - name: google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl root: acls This implies the following inherited HTTP annotation: service Storage { // Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\"; } ... }", "id": "Mixin", "properties": { "name": { diff --git a/googleapiclient/discovery_cache/documents/speech.v1.json b/googleapiclient/discovery_cache/documents/speech.v1.json index 2c5dda0b7c7..d78bcb7ae06 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1.json @@ -524,7 +524,7 @@ } } }, -"revision": "20240625", +"revision": "20240926", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ABNFGrammar": { @@ -1044,7 +1044,8 @@ "OGG_OPUS", "SPEEX_WITH_HEADER_BYTE", "MP3", -"WEBM_OPUS" +"WEBM_OPUS", +"ALAW" ], "enumDescriptions": [ "Not specified.", @@ -1056,7 +1057,8 @@ "Opus encoded audio frames in Ogg container ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.", "Although the use of lossy encodings is not recommended, if a very low bitrate encoding is required, `OGG_OPUS` is highly preferred over Speex encoding. The [Speex](https://speex.org/) encoding supported by Cloud Speech API has a header byte in each block, as in MIME type `audio/x-speex-with-header-byte`. It is a variant of the RTP Speex encoding defined in [RFC 5574](https://tools.ietf.org/html/rfc5574). The stream is a sequence of blocks, one block per RTP packet. Each block starts with a byte containing the length of the block, in bytes, followed by one or more frames of Speex data, padded to an integral number of bytes (octets) as specified in RFC 5574. In other words, each RTP header is replaced with a single byte containing the block length. Only Speex wideband is supported. `sample_rate_hertz` must be 16000.", "MP3 audio. MP3 encoding is a Beta feature and only available in v1p1beta1. Support all standard MP3 bitrates (which range from 32-320 kbps). When using this encoding, `sample_rate_hertz` has to match the sample rate of the file being used.", -"Opus encoded audio frames in WebM container ([WebM](https://www.webmproject.org/docs/container/)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000." +"Opus encoded audio frames in WebM container ([WebM](https://www.webmproject.org/docs/container/)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.", +"8-bit samples that compand 13-bit audio samples using G.711 PCMU/a-law." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json index e6bb7f39c20..0a3d81c938e 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json @@ -524,7 +524,7 @@ } } }, -"revision": "20240625", +"revision": "20240926", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ABNFGrammar": { @@ -1060,7 +1060,8 @@ "OGG_OPUS", "SPEEX_WITH_HEADER_BYTE", "MP3", -"WEBM_OPUS" +"WEBM_OPUS", +"ALAW" ], "enumDescriptions": [ "Not specified.", @@ -1072,7 +1073,8 @@ "Opus encoded audio frames in Ogg container ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.", "Although the use of lossy encodings is not recommended, if a very low bitrate encoding is required, `OGG_OPUS` is highly preferred over Speex encoding. The [Speex](https://speex.org/) encoding supported by Cloud Speech API has a header byte in each block, as in MIME type `audio/x-speex-with-header-byte`. It is a variant of the RTP Speex encoding defined in [RFC 5574](https://tools.ietf.org/html/rfc5574). The stream is a sequence of blocks, one block per RTP packet. Each block starts with a byte containing the length of the block, in bytes, followed by one or more frames of Speex data, padded to an integral number of bytes (octets) as specified in RFC 5574. In other words, each RTP header is replaced with a single byte containing the block length. Only Speex wideband is supported. `sample_rate_hertz` must be 16000.", "MP3 audio. MP3 encoding is a Beta feature and only available in v1p1beta1. Support all standard MP3 bitrates (which range from 32-320 kbps). When using this encoding, `sample_rate_hertz` has to match the sample rate of the file being used.", -"Opus encoded audio frames in WebM container ([WebM](https://www.webmproject.org/docs/container/)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000." +"Opus encoded audio frames in WebM container ([WebM](https://www.webmproject.org/docs/container/)). `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.", +"8-bit samples that compand 13-bit audio samples using G.711 PCMU/a-law." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/storagetransfer.v1.json b/googleapiclient/discovery_cache/documents/storagetransfer.v1.json index da8a89ba264..cce2d930146 100644 --- a/googleapiclient/discovery_cache/documents/storagetransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/storagetransfer.v1.json @@ -392,7 +392,7 @@ ], "parameters": { "filter": { -"description": "Required. A list of query parameters specified as JSON text in the form of: `{\"projectId\":\"my_project_id\", \"jobNames\":[\"jobid1\",\"jobid2\",...], \"jobStatuses\":[\"status1\",\"status2\",...]}` Since `jobNames` and `jobStatuses` support multiple values, their values must be specified with array notation. `projectId` is required. `jobNames` and `jobStatuses` are optional. The valid values for `jobStatuses` are case-insensitive: ENABLED, DISABLED, and DELETED.", +"description": "Required. A list of query parameters specified as JSON text in the form of: ``` { \"projectId\":\"my_project_id\", \"jobNames\":[\"jobid1\",\"jobid2\",...], \"jobStatuses\":[\"status1\",\"status2\",...], \"dataBackend\":\"QUERY_REPLICATION_CONFIGS\", \"sourceBucket\":\"source-bucket-name\", \"sinkBucket\":\"sink-bucket-name\", } ``` The JSON formatting in the example is for display only; provide the query parameters without spaces or line breaks. * `projectId` is required. * Since `jobNames` and `jobStatuses` support multiple values, their values must be specified with array notation. `jobNames` and `jobStatuses` are optional. Valid values are case-insensitive: * ENABLED * DISABLED * DELETED * Specify `\"dataBackend\":\"QUERY_REPLICATION_CONFIGS\"` to return a list of cross-bucket replication jobs. * Limit the results to jobs from a particular bucket with `sourceBucket` and/or to a particular bucket with `sinkBucket`.", "location": "query", "required": true, "type": "string" @@ -632,7 +632,7 @@ } } }, -"revision": "20240705", +"revision": "20240928", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AgentPool": { @@ -1368,24 +1368,24 @@ "type": "object" }, "ReplicationSpec": { -"description": "Specifies the configuration for running a replication job.", +"description": "Specifies the configuration for a cross-bucket replication job. Cross-bucket replication copies new or updated objects from a source Cloud Storage bucket to a destination Cloud Storage bucket. Existing objects in the source bucket are not copied by a new cross-bucket replication job.", "id": "ReplicationSpec", "properties": { "gcsDataSink": { "$ref": "GcsData", -"description": "Specifies cloud Storage data sink." +"description": "The Cloud Storage bucket to which to replicate objects." }, "gcsDataSource": { "$ref": "GcsData", -"description": "Specifies cloud Storage data source." +"description": "The Cloud Storage bucket from which to replicate objects." }, "objectConditions": { "$ref": "ObjectConditions", -"description": "Specifies the object conditions to only include objects that satisfy these conditions in the set of data source objects. Object conditions based on objects' \"last modification time\" do not exclude objects in a data sink." +"description": "Object conditions that determine which objects are transferred. For replication jobs, only `include_prefixes` and `exclude_prefixes` are supported." }, "transferOptions": { "$ref": "TransferOptions", -"description": "Specifies the actions to be performed on the object during replication. Delete options are not supported for replication and when specified, the request fails with an INVALID_ARGUMENT error." +"description": "Specifies the metadata options to be applied during replication. Delete options are not supported. If a delete option is specified, the request fails with an INVALID_ARGUMENT error." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1.json index 26a37ba4f28..bb71f9b6def 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1.json @@ -294,6 +294,24 @@ }, "voices": { "methods": { +"generateVoiceCloningKey": { +"description": "Generates voice clone key given a short voice prompt. This method validates the voice prompts with a series of checks against the voice talent statement to verify the voice clone is safe to generate.", +"flatPath": "v1/voices:generateVoiceCloningKey", +"httpMethod": "POST", +"id": "texttospeech.voices.generateVoiceCloningKey", +"parameterOrder": [], +"parameters": {}, +"path": "v1/voices:generateVoiceCloningKey", +"request": { +"$ref": "GenerateVoiceCloningKeyRequest" +}, +"response": { +"$ref": "GenerateVoiceCloningKeyResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "list": { "description": "Returns a list of Voice supported for synthesis.", "flatPath": "v1/voices", @@ -318,9 +336,20 @@ } } }, -"revision": "20240815", +"revision": "20241001", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { +"AdvancedVoiceOptions": { +"description": "Used for advanced voice options.", +"id": "AdvancedVoiceOptions", +"properties": { +"lowLatencyJourneySynthesis": { +"description": "Only for Jounrney voices. If false, the synthesis will be context aware and have higher latency.", +"type": "boolean" +} +}, +"type": "object" +}, "AudioConfig": { "description": "Description of audio data to be synthesized.", "id": "AudioConfig", @@ -381,6 +410,49 @@ "properties": {}, "type": "object" }, +"CustomPronunciationParams": { +"description": "Pronunciation customization for a phrase.", +"id": "CustomPronunciationParams", +"properties": { +"phoneticEncoding": { +"description": "The phonetic encoding of the phrase.", +"enum": [ +"PHONETIC_ENCODING_UNSPECIFIED", +"PHONETIC_ENCODING_IPA", +"PHONETIC_ENCODING_X_SAMPA" +], +"enumDescriptions": [ +"Not specified.", +"IPA. (e.g. apple -> \u02c8\u00e6p\u0259l ) https://en.wikipedia.org/wiki/International_Phonetic_Alphabet", +"X-SAMPA (e.g. apple -> \"{p@l\" ) https://en.wikipedia.org/wiki/X-SAMPA" +], +"type": "string" +}, +"phrase": { +"description": "The phrase to which the customization will be applied. The phrase can be multiple words (in the case of proper nouns etc), but should not span to a whole sentence.", +"type": "string" +}, +"pronunciation": { +"description": "The pronunciation of the phrase. This must be in the phonetic encoding specified above.", +"type": "string" +} +}, +"type": "object" +}, +"CustomPronunciations": { +"description": "A collection of pronunciation customizations.", +"id": "CustomPronunciations", +"properties": { +"pronunciations": { +"description": "The pronunciation customizations to be applied.", +"items": { +"$ref": "CustomPronunciationParams" +}, +"type": "array" +} +}, +"type": "object" +}, "CustomVoiceParams": { "description": "Description of the custom voice to be synthesized.", "id": "CustomVoiceParams", @@ -413,6 +485,40 @@ "properties": {}, "type": "object" }, +"GenerateVoiceCloningKeyRequest": { +"description": "Request message for the `GenerateVoiceCloningKey` method.", +"id": "GenerateVoiceCloningKeyRequest", +"properties": { +"consentScript": { +"description": "Required. The script used for the voice talent statement. The script will be provided to the caller through other channels. It must be returned unchanged in this field.", +"type": "string" +}, +"languageCode": { +"description": "Required. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: \"en-US\". See [Language Support](https://cloud.google.com/speech-to-text/docs/languages) for a list of the currently supported language codes.", +"type": "string" +}, +"referenceAudio": { +"$ref": "InputAudio", +"description": "Required. The training audio used to create voice clone. This is currently limited to LINEAR16 PCM WAV files mono audio with 24khz sample rate. This needs to be specified in [InputAudio.audio_config], other values will be explicitly rejected." +}, +"voiceTalentConsent": { +"$ref": "InputAudio", +"description": "Required. The voice talent audio used to verify consent to voice clone." +} +}, +"type": "object" +}, +"GenerateVoiceCloningKeyResponse": { +"description": "Response message for the `GenerateVoiceCloningKey` method.", +"id": "GenerateVoiceCloningKeyResponse", +"properties": { +"voiceCloningKey": { +"description": "The voice clone key. Use it in the SynthesizeSpeechRequest by setting [voice.voice_clone.voice_cloning_key].", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudTexttospeechV1SynthesizeLongAudioMetadata": { "description": "Metadata for response returned by the `SynthesizeLongAudio` method.", "id": "GoogleCloudTexttospeechV1SynthesizeLongAudioMetadata", @@ -436,6 +542,54 @@ }, "type": "object" }, +"InputAudio": { +"description": "Holds audio content and config.", +"id": "InputAudio", +"properties": { +"audioConfig": { +"$ref": "InputAudioConfig", +"description": "Required. Provides information that specifies how to process content." +}, +"content": { +"description": "Required. The audio data bytes encoded as specified in `InputAudioConfig`. Note: as with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64. Audio samples should be between 5-25 seconds in length.", +"format": "byte", +"type": "string" +} +}, +"type": "object" +}, +"InputAudioConfig": { +"description": "Description of inputted audio data.", +"id": "InputAudioConfig", +"properties": { +"audioEncoding": { +"description": "Required. The format of the audio byte stream.", +"enum": [ +"AUDIO_ENCODING_UNSPECIFIED", +"LINEAR16", +"MP3", +"OGG_OPUS", +"MULAW", +"ALAW" +], +"enumDescriptions": [ +"Not specified. Will return result google.rpc.Code.INVALID_ARGUMENT.", +"Uncompressed 16-bit signed little-endian samples (Linear PCM). Audio content returned as LINEAR16 also contains a WAV header.", +"MP3 audio at 32kbps.", +"Opus encoded audio wrapped in an ogg container. The result will be a file which can be played natively on Android, and in browsers (at least Chrome and Firefox). The quality of the encoding is considerably higher than MP3 while using approximately the same bitrate.", +"8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. Audio content returned as MULAW also contains a WAV header.", +"8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. Audio content returned as ALAW also contains a WAV header." +], +"type": "string" +}, +"sampleRateHertz": { +"description": "Required. The sample rate (in hertz) for this audio.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "ListOperationsResponse": { "description": "The response message for Operations.ListOperations.", "id": "ListOperationsResponse", @@ -534,6 +688,10 @@ "description": "Contains text input to be synthesized. Either `text` or `ssml` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. The input size is limited to 5000 bytes.", "id": "SynthesisInput", "properties": { +"customPronunciations": { +"$ref": "CustomPronunciations", +"description": "Optional. The pronunciation customizations to be applied to the input. If this is set, the input will be synthesized using the given pronunciation customizations. The initial support will be for EFIGS (English, French, Italian, German, Spanish) languages, as provided in VoiceSelectionParams. Journey and Instant Clone voices are not supported yet. In order to customize the pronunciation of a phrase, there must be an exact match of the phrase in the input types. If using SSML, the phrase must not be inside a phoneme tag (entirely or partially)." +}, "ssml": { "description": "The SSML document to be synthesized. The SSML document must be valid and well-formed. Otherwise the RPC will fail and return google.rpc.Code.INVALID_ARGUMENT. For more information, see [SSML](https://cloud.google.com/text-to-speech/docs/ssml).", "type": "string" @@ -595,6 +753,10 @@ "description": "The top-level message sent by the client for the `SynthesizeSpeech` method.", "id": "SynthesizeSpeechRequest", "properties": { +"advancedVoiceOptions": { +"$ref": "AdvancedVoiceOptions", +"description": "Adnanced voice options." +}, "audioConfig": { "$ref": "AudioConfig", "description": "Required. The configuration of the synthesized audio." @@ -661,6 +823,17 @@ }, "type": "object" }, +"VoiceCloneParams": { +"description": "The configuration of Voice Clone feature.", +"id": "VoiceCloneParams", +"properties": { +"voiceCloningKey": { +"description": "Required. Created by GenerateVoiceCloningKey.", +"type": "string" +} +}, +"type": "object" +}, "VoiceSelectionParams": { "description": "Description of which voice to use for a synthesis request.", "id": "VoiceSelectionParams", @@ -692,6 +865,10 @@ "A gender-neutral voice. This voice is not yet supported." ], "type": "string" +}, +"voiceClone": { +"$ref": "VoiceCloneParams", +"description": "Optional. The configuration for a voice clone. If [VoiceCloneParams.voice_clone_key] is set, the service will choose the voice clone matching the specified configuration." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json index 49423163da4..5c3893e6595 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json @@ -237,6 +237,24 @@ }, "voices": { "methods": { +"generateVoiceCloningKey": { +"description": "Generates voice clone key given a short voice prompt. This method validates the voice prompts with a series of checks against the voice talent statement to verify the voice clone is safe to generate.", +"flatPath": "v1beta1/voices:generateVoiceCloningKey", +"httpMethod": "POST", +"id": "texttospeech.voices.generateVoiceCloningKey", +"parameterOrder": [], +"parameters": {}, +"path": "v1beta1/voices:generateVoiceCloningKey", +"request": { +"$ref": "GenerateVoiceCloningKeyRequest" +}, +"response": { +"$ref": "GenerateVoiceCloningKeyResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "list": { "description": "Returns a list of Voice supported for synthesis.", "flatPath": "v1beta1/voices", @@ -261,9 +279,20 @@ } } }, -"revision": "20240815", +"revision": "20241001", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { +"AdvancedVoiceOptions": { +"description": "Used for advanced voice options.", +"id": "AdvancedVoiceOptions", +"properties": { +"lowLatencyJourneySynthesis": { +"description": "Only for Jounrney voices. If false, the synthesis will be context aware and have higher latency.", +"type": "boolean" +} +}, +"type": "object" +}, "AudioConfig": { "description": "Description of audio data to be synthesized.", "id": "AudioConfig", @@ -320,6 +349,49 @@ }, "type": "object" }, +"CustomPronunciationParams": { +"description": "Pronunciation customization for a phrase.", +"id": "CustomPronunciationParams", +"properties": { +"phoneticEncoding": { +"description": "The phonetic encoding of the phrase.", +"enum": [ +"PHONETIC_ENCODING_UNSPECIFIED", +"PHONETIC_ENCODING_IPA", +"PHONETIC_ENCODING_X_SAMPA" +], +"enumDescriptions": [ +"Not specified.", +"IPA. (e.g. apple -> \u02c8\u00e6p\u0259l ) https://en.wikipedia.org/wiki/International_Phonetic_Alphabet", +"X-SAMPA (e.g. apple -> \"{p@l\" ) https://en.wikipedia.org/wiki/X-SAMPA" +], +"type": "string" +}, +"phrase": { +"description": "The phrase to which the customization will be applied. The phrase can be multiple words (in the case of proper nouns etc), but should not span to a whole sentence.", +"type": "string" +}, +"pronunciation": { +"description": "The pronunciation of the phrase. This must be in the phonetic encoding specified above.", +"type": "string" +} +}, +"type": "object" +}, +"CustomPronunciations": { +"description": "A collection of pronunciation customizations.", +"id": "CustomPronunciations", +"properties": { +"pronunciations": { +"description": "The pronunciation customizations to be applied.", +"items": { +"$ref": "CustomPronunciationParams" +}, +"type": "array" +} +}, +"type": "object" +}, "CustomVoiceParams": { "description": "Description of the custom voice to be synthesized.", "id": "CustomVoiceParams", @@ -346,6 +418,40 @@ }, "type": "object" }, +"GenerateVoiceCloningKeyRequest": { +"description": "Request message for the `GenerateVoiceCloningKey` method.", +"id": "GenerateVoiceCloningKeyRequest", +"properties": { +"consentScript": { +"description": "Required. The script used for the voice talent statement. The script will be provided to the caller through other channels. It must be returned unchanged in this field.", +"type": "string" +}, +"languageCode": { +"description": "Required. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: \"en-US\". See [Language Support](https://cloud.google.com/speech-to-text/docs/languages) for a list of the currently supported language codes.", +"type": "string" +}, +"referenceAudio": { +"$ref": "InputAudio", +"description": "Required. The training audio used to create voice clone. This is currently limited to LINEAR16 PCM WAV files mono audio with 24khz sample rate. This needs to be specified in [InputAudio.audio_config], other values will be explicitly rejected." +}, +"voiceTalentConsent": { +"$ref": "InputAudio", +"description": "Required. The voice talent audio used to verify consent to voice clone." +} +}, +"type": "object" +}, +"GenerateVoiceCloningKeyResponse": { +"description": "Response message for the `GenerateVoiceCloningKey` method.", +"id": "GenerateVoiceCloningKeyResponse", +"properties": { +"voiceCloningKey": { +"description": "The voice clone key. Use it in the SynthesizeSpeechRequest by setting [voice.voice_clone.voice_cloning_key].", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudTexttospeechV1beta1SynthesizeLongAudioMetadata": { "description": "Metadata for response returned by the `SynthesizeLongAudio` method.", "id": "GoogleCloudTexttospeechV1beta1SynthesizeLongAudioMetadata", @@ -369,6 +475,56 @@ }, "type": "object" }, +"InputAudio": { +"description": "Holds audio content and config.", +"id": "InputAudio", +"properties": { +"audioConfig": { +"$ref": "InputAudioConfig", +"description": "Required. Provides information that specifies how to process content." +}, +"content": { +"description": "Required. The audio data bytes encoded as specified in `InputAudioConfig`. Note: as with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64. Audio samples should be between 5-25 seconds in length.", +"format": "byte", +"type": "string" +} +}, +"type": "object" +}, +"InputAudioConfig": { +"description": "Description of inputted audio data.", +"id": "InputAudioConfig", +"properties": { +"audioEncoding": { +"description": "Required. The format of the audio byte stream.", +"enum": [ +"AUDIO_ENCODING_UNSPECIFIED", +"LINEAR16", +"MP3", +"MP3_64_KBPS", +"OGG_OPUS", +"MULAW", +"ALAW" +], +"enumDescriptions": [ +"Not specified. Will return result google.rpc.Code.INVALID_ARGUMENT.", +"Uncompressed 16-bit signed little-endian samples (Linear PCM). Audio content returned as LINEAR16 also contains a WAV header.", +"MP3 audio at 32kbps.", +"MP3 at 64kbps.", +"Opus encoded audio wrapped in an ogg container. The result will be a file which can be played natively on Android, and in browsers (at least Chrome and Firefox). The quality of the encoding is considerably higher than MP3 while using approximately the same bitrate.", +"8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law. Audio content returned as MULAW also contains a WAV header.", +"8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law. Audio content returned as ALAW also contains a WAV header." +], +"type": "string" +}, +"sampleRateHertz": { +"description": "Required. The sample rate (in hertz) for this audio.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "ListOperationsResponse": { "description": "The response message for Operations.ListOperations.", "id": "ListOperationsResponse", @@ -467,6 +623,10 @@ "description": "Contains text input to be synthesized. Either `text` or `ssml` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. The input size is limited to 5000 bytes.", "id": "SynthesisInput", "properties": { +"customPronunciations": { +"$ref": "CustomPronunciations", +"description": "Optional. The pronunciation customizations to be applied to the input. If this is set, the input will be synthesized using the given pronunciation customizations. The initial support will be for EFIGS (English, French, Italian, German, Spanish) languages, as provided in VoiceSelectionParams. Journey and Instant Clone voices are not supported yet. In order to customize the pronunciation of a phrase, there must be an exact match of the phrase in the input types. If using SSML, the phrase must not be inside a phoneme tag (entirely or partially)." +}, "ssml": { "description": "The SSML document to be synthesized. The SSML document must be valid and well-formed. Otherwise the RPC will fail and return google.rpc.Code.INVALID_ARGUMENT. For more information, see [SSML](https://cloud.google.com/text-to-speech/docs/ssml).", "type": "string" @@ -528,6 +688,10 @@ "description": "The top-level message sent by the client for the `SynthesizeSpeech` method.", "id": "SynthesizeSpeechRequest", "properties": { +"advancedVoiceOptions": { +"$ref": "AdvancedVoiceOptions", +"description": "Adnanced voice options." +}, "audioConfig": { "$ref": "AudioConfig", "description": "Required. The configuration of the synthesized audio." @@ -636,6 +800,17 @@ }, "type": "object" }, +"VoiceCloneParams": { +"description": "The configuration of Voice Clone feature.", +"id": "VoiceCloneParams", +"properties": { +"voiceCloningKey": { +"description": "Required. Created by GenerateVoiceCloningKey.", +"type": "string" +} +}, +"type": "object" +}, "VoiceSelectionParams": { "description": "Description of which voice to use for a synthesis request.", "id": "VoiceSelectionParams", @@ -667,6 +842,10 @@ "A gender-neutral voice. This voice is not yet supported." ], "type": "string" +}, +"voiceClone": { +"$ref": "VoiceCloneParams", +"description": "Optional. The configuration for a voice clone. If [VoiceCloneParams.voice_clone_key] is set, the service will choose the voice clone matching the specified configuration." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/workflows.v1.json b/googleapiclient/discovery_cache/documents/workflows.v1.json index f111259f8e0..fd3edf0602b 100644 --- a/googleapiclient/discovery_cache/documents/workflows.v1.json +++ b/googleapiclient/discovery_cache/documents/workflows.v1.json @@ -485,7 +485,7 @@ } } }, -"revision": "20240814", +"revision": "20240925", "rootUrl": "https://workflows.googleapis.com/", "schemas": { "Empty": { @@ -722,7 +722,7 @@ "type": "object" }, "Workflow": { -"description": "LINT.IfChange Workflow program to be executed by Workflows.", +"description": "Workflow program to be executed by Workflows.", "id": "Workflow", "properties": { "allKmsKeys": {